repo_name
string
path
string
copies
string
size
string
content
string
license
string
ztemt/NX511J_kernel
drivers/cpufreq/intel_pstate.c
658
17943
/* * intel_pstate.c: Native P state management for Intel processors * * (C) Copyright 2012 Intel Corporation * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/ktime.h> #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/list.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/sysfs.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <trace/events/power.h> #include <asm/div64.h> #include <asm/msr.h> #include <asm/cpu_device_id.h> #define SAMPLE_COUNT 3 #define FRAC_BITS 8 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) #define fp_toint(X) ((X) >> FRAC_BITS) static inline int32_t mul_fp(int32_t x, int32_t y) { return ((int64_t)x * (int64_t)y) >> FRAC_BITS; } static inline int32_t div_fp(int32_t x, int32_t y) { return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); } struct sample { int32_t core_pct_busy; u64 aperf; u64 mperf; int freq; }; struct pstate_data { int current_pstate; int min_pstate; int max_pstate; int turbo_pstate; }; struct _pid { int setpoint; int32_t integral; int32_t p_gain; int32_t i_gain; int32_t d_gain; int deadband; int32_t last_err; }; struct cpudata { int cpu; char name[64]; struct timer_list timer; struct pstate_adjust_policy *pstate_policy; struct pstate_data pstate; struct _pid pid; int min_pstate_count; u64 prev_aperf; u64 prev_mperf; int sample_ptr; struct sample samples[SAMPLE_COUNT]; }; static struct cpudata **all_cpu_data; struct pstate_adjust_policy { int sample_rate_ms; int deadband; int setpoint; int p_gain_pct; int d_gain_pct; int i_gain_pct; }; static struct pstate_adjust_policy default_policy = { .sample_rate_ms = 10, .deadband = 0, .setpoint = 97, .p_gain_pct = 20, .d_gain_pct = 0, .i_gain_pct = 0, }; struct perf_limits { int no_turbo; int max_perf_pct; int min_perf_pct; int32_t max_perf; int32_t min_perf; int max_policy_pct; int max_sysfs_pct; }; static struct perf_limits limits = { .no_turbo = 0, .max_perf_pct = 100, .max_perf = int_tofp(1), .min_perf_pct = 0, .min_perf = 0, .max_policy_pct = 100, .max_sysfs_pct = 100, }; static inline void pid_reset(struct _pid *pid, int setpoint, int busy, int deadband, int integral) { pid->setpoint = setpoint; pid->deadband = deadband; pid->integral = int_tofp(integral); pid->last_err = setpoint - busy; } static inline void pid_p_gain_set(struct _pid *pid, int percent) { pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); } static inline void pid_i_gain_set(struct _pid *pid, int percent) { pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); } static inline void pid_d_gain_set(struct _pid *pid, int percent) { pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); } static signed int pid_calc(struct _pid *pid, int32_t busy) { signed int result; int32_t pterm, dterm, fp_error; int32_t integral_limit; fp_error = int_tofp(pid->setpoint) - busy; if (abs(fp_error) <= int_tofp(pid->deadband)) return 0; pterm = mul_fp(pid->p_gain, fp_error); pid->integral += fp_error; /* limit the integral term */ integral_limit = int_tofp(30); if (pid->integral > integral_limit) pid->integral = integral_limit; if (pid->integral < -integral_limit) pid->integral = -integral_limit; dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); pid->last_err = fp_error; result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; return (signed int)fp_toint(result); } static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) { pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct); pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct); pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct); pid_reset(&cpu->pid, cpu->pstate_policy->setpoint, 100, cpu->pstate_policy->deadband, 0); } static inline void intel_pstate_reset_all_pid(void) { unsigned int cpu; for_each_online_cpu(cpu) { if (all_cpu_data[cpu]) intel_pstate_busy_pid_reset(all_cpu_data[cpu]); } } /************************** debugfs begin ************************/ static int pid_param_set(void *data, u64 val) { *(u32 *)data = val; intel_pstate_reset_all_pid(); return 0; } static int pid_param_get(void *data, u64 *val) { *val = *(u32 *)data; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); struct pid_param { char *name; void *value; }; static struct pid_param pid_files[] = { {"sample_rate_ms", &default_policy.sample_rate_ms}, {"d_gain_pct", &default_policy.d_gain_pct}, {"i_gain_pct", &default_policy.i_gain_pct}, {"deadband", &default_policy.deadband}, {"setpoint", &default_policy.setpoint}, {"p_gain_pct", &default_policy.p_gain_pct}, {NULL, NULL} }; static struct dentry *debugfs_parent; static void intel_pstate_debug_expose_params(void) { int i = 0; debugfs_parent = debugfs_create_dir("pstate_snb", NULL); if (IS_ERR_OR_NULL(debugfs_parent)) return; while (pid_files[i].name) { debugfs_create_file(pid_files[i].name, 0660, debugfs_parent, pid_files[i].value, &fops_pid_param); i++; } } /************************** debugfs end ************************/ /************************** sysfs begin ************************/ #define show_one(file_name, object) \ static ssize_t show_##file_name \ (struct kobject *kobj, struct attribute *attr, char *buf) \ { \ return sprintf(buf, "%u\n", limits.object); \ } static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; limits.no_turbo = clamp_t(int, input, 0 , 1); return count; } static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); return count; } static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; limits.min_perf_pct = clamp_t(int, input, 0 , 100); limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); return count; } show_one(no_turbo, no_turbo); show_one(max_perf_pct, max_perf_pct); show_one(min_perf_pct, min_perf_pct); define_one_global_rw(no_turbo); define_one_global_rw(max_perf_pct); define_one_global_rw(min_perf_pct); static struct attribute *intel_pstate_attributes[] = { &no_turbo.attr, &max_perf_pct.attr, &min_perf_pct.attr, NULL }; static struct attribute_group intel_pstate_attr_group = { .attrs = intel_pstate_attributes, }; static struct kobject *intel_pstate_kobject; static void intel_pstate_sysfs_expose_params(void) { int rc; intel_pstate_kobject = kobject_create_and_add("intel_pstate", &cpu_subsys.dev_root->kobj); BUG_ON(!intel_pstate_kobject); rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); BUG_ON(rc); } /************************** sysfs end ************************/ static int intel_pstate_min_pstate(void) { u64 value; rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 40) & 0xFF; } static int intel_pstate_max_pstate(void) { u64 value; rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 8) & 0xFF; } static int intel_pstate_turbo_pstate(void) { u64 value; int nont, ret; rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); nont = intel_pstate_max_pstate(); ret = ((value) & 255); if (ret <= nont) ret = nont; return ret; } static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) { int max_perf = cpu->pstate.turbo_pstate; int max_perf_adj; int min_perf; if (limits.no_turbo) max_perf = cpu->pstate.max_pstate; max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) { int max_perf, min_perf; intel_pstate_get_min_max(cpu, &min_perf, &max_perf); pstate = clamp_t(int, pstate, min_perf, max_perf); if (pstate == cpu->pstate.current_pstate) return; trace_cpu_frequency(pstate * 100000, cpu->cpu); cpu->pstate.current_pstate = pstate; if (limits.no_turbo) wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8)); else wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); } static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) { int target; target = cpu->pstate.current_pstate + steps; intel_pstate_set_pstate(cpu, target); } static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps) { int target; target = cpu->pstate.current_pstate - steps; intel_pstate_set_pstate(cpu, target); } static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { sprintf(cpu->name, "Intel 2nd generation core"); cpu->pstate.min_pstate = intel_pstate_min_pstate(); cpu->pstate.max_pstate = intel_pstate_max_pstate(); cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate(); /* * goto max pstate so we don't slow up boot if we are built-in if we are * a module we will take care of it during normal operation */ intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); } static inline void intel_pstate_calc_busy(struct cpudata *cpu, struct sample *sample) { u64 core_pct; core_pct = div64_u64(int_tofp(sample->aperf * 100), sample->mperf); sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000); sample->core_pct_busy = core_pct; } static inline void intel_pstate_sample(struct cpudata *cpu) { u64 aperf, mperf; rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; cpu->samples[cpu->sample_ptr].aperf = aperf; cpu->samples[cpu->sample_ptr].mperf = mperf; cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; } static inline void intel_pstate_set_sample_time(struct cpudata *cpu) { int sample_time, delay; sample_time = cpu->pstate_policy->sample_rate_ms; delay = msecs_to_jiffies(sample_time); mod_timer_pinned(&cpu->timer, jiffies + delay); } static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) { int32_t core_busy, max_pstate, current_pstate; core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy; max_pstate = int_tofp(cpu->pstate.max_pstate); current_pstate = int_tofp(cpu->pstate.current_pstate); return mul_fp(core_busy, div_fp(max_pstate, current_pstate)); } static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) { int32_t busy_scaled; struct _pid *pid; signed int ctl = 0; int steps; pid = &cpu->pid; busy_scaled = intel_pstate_get_scaled_busy(cpu); ctl = pid_calc(pid, busy_scaled); steps = abs(ctl); if (ctl < 0) intel_pstate_pstate_increase(cpu, steps); else intel_pstate_pstate_decrease(cpu, steps); } static void intel_pstate_timer_func(unsigned long __data) { struct cpudata *cpu = (struct cpudata *) __data; intel_pstate_sample(cpu); intel_pstate_adjust_busy_pstate(cpu); if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { cpu->min_pstate_count++; if (!(cpu->min_pstate_count % 5)) { intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); } } else cpu->min_pstate_count = 0; intel_pstate_set_sample_time(cpu); } #define ICPU(model, policy) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ (unsigned long)&policy } static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x2a, default_policy), ICPU(0x2d, default_policy), ICPU(0x3a, default_policy), ICPU(0x3c, default_policy), ICPU(0x3e, default_policy), ICPU(0x3f, default_policy), ICPU(0x45, default_policy), ICPU(0x46, default_policy), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); static int intel_pstate_init_cpu(unsigned int cpunum) { const struct x86_cpu_id *id; struct cpudata *cpu; id = x86_match_cpu(intel_pstate_cpu_ids); if (!id) return -ENODEV; all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); if (!all_cpu_data[cpunum]) return -ENOMEM; cpu = all_cpu_data[cpunum]; intel_pstate_get_cpu_pstates(cpu); if (!cpu->pstate.current_pstate) { all_cpu_data[cpunum] = NULL; kfree(cpu); return -ENODATA; } cpu->cpu = cpunum; cpu->pstate_policy = (struct pstate_adjust_policy *)id->driver_data; init_timer_deferrable(&cpu->timer); cpu->timer.function = intel_pstate_timer_func; cpu->timer.data = (unsigned long)cpu; cpu->timer.expires = jiffies + HZ/100; intel_pstate_busy_pid_reset(cpu); intel_pstate_sample(cpu); intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); add_timer_on(&cpu->timer, cpunum); pr_info("Intel pstate controlling: cpu %d\n", cpunum); return 0; } static unsigned int intel_pstate_get(unsigned int cpu_num) { struct sample *sample; struct cpudata *cpu; cpu = all_cpu_data[cpu_num]; if (!cpu) return 0; sample = &cpu->samples[cpu->sample_ptr]; return sample->freq; } static int intel_pstate_set_policy(struct cpufreq_policy *policy) { struct cpudata *cpu; cpu = all_cpu_data[policy->cpu]; if (!policy->cpuinfo.max_freq) return -ENODEV; if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { limits.min_perf_pct = 100; limits.min_perf = int_tofp(1); limits.max_perf_pct = 100; limits.max_perf = int_tofp(1); limits.no_turbo = 0; return 0; } limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); return 0; } static int intel_pstate_verify_policy(struct cpufreq_policy *policy) { cpufreq_verify_within_cpu_limits(policy); if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) return -EINVAL; return 0; } static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) { int cpu = policy->cpu; del_timer(&all_cpu_data[cpu]->timer); kfree(all_cpu_data[cpu]); all_cpu_data[cpu] = NULL; return 0; } static int intel_pstate_cpu_init(struct cpufreq_policy *policy) { struct cpudata *cpu; int rc; rc = intel_pstate_init_cpu(policy->cpu); if (rc) return rc; cpu = all_cpu_data[policy->cpu]; if (!limits.no_turbo && limits.min_perf_pct == 100 && limits.max_perf_pct == 100) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; policy->min = cpu->pstate.min_pstate * 100000; policy->max = cpu->pstate.turbo_pstate * 100000; /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; cpumask_set_cpu(policy->cpu, policy->cpus); return 0; } static struct cpufreq_driver intel_pstate_driver = { .flags = CPUFREQ_CONST_LOOPS, .verify = intel_pstate_verify_policy, .setpolicy = intel_pstate_set_policy, .get = intel_pstate_get, .init = intel_pstate_cpu_init, .exit = intel_pstate_cpu_exit, .name = "intel_pstate", }; static int __initdata no_load; static int intel_pstate_msrs_not_valid(void) { /* Check that all the msr's we are using are valid. */ u64 aperf, mperf, tmp; rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); if (!intel_pstate_min_pstate() || !intel_pstate_max_pstate() || !intel_pstate_turbo_pstate()) return -ENODEV; rdmsrl(MSR_IA32_APERF, tmp); if (!(tmp - aperf)) return -ENODEV; rdmsrl(MSR_IA32_MPERF, tmp); if (!(tmp - mperf)) return -ENODEV; return 0; } static int __init intel_pstate_init(void) { int cpu, rc = 0; const struct x86_cpu_id *id; if (no_load) return -ENODEV; id = x86_match_cpu(intel_pstate_cpu_ids); if (!id) return -ENODEV; if (intel_pstate_msrs_not_valid()) return -ENODEV; pr_info("Intel P-state driver initializing.\n"); all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); if (!all_cpu_data) return -ENOMEM; rc = cpufreq_register_driver(&intel_pstate_driver); if (rc) goto out; intel_pstate_debug_expose_params(); intel_pstate_sysfs_expose_params(); return rc; out: get_online_cpus(); for_each_online_cpu(cpu) { if (all_cpu_data[cpu]) { del_timer_sync(&all_cpu_data[cpu]->timer); kfree(all_cpu_data[cpu]); } } put_online_cpus(); vfree(all_cpu_data); return -ENODEV; } device_initcall(intel_pstate_init); static int __init intel_pstate_setup(char *str) { if (!str) return -EINVAL; if (!strcmp(str, "disable")) no_load = 1; return 0; } early_param("intel_pstate", intel_pstate_setup); MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); MODULE_LICENSE("GPL");
gpl-2.0
jamiethemorris/SPH-L710_Kernel
arch/arm/mach-msm/devices-qsd8x50.c
1170
20556
/* * Copyright (C) 2008 Google, Inc. * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <mach/kgsl.h> #include <linux/dma-mapping.h> #include <asm/clkdev.h> #include <mach/irqs.h> #include <mach/msm_iomap.h> #include <mach/dma.h> #include <mach/board.h> #include "devices.h" #include <asm/mach/flash.h> #include <asm/mach/mmc.h> #include <mach/msm_hsusb.h> #include <mach/usbdiag.h> #include <mach/rpc_hsusb.h> #include "pm.h" struct platform_device msm8x50_device_acpuclk = { .name = "acpuclk-8x50", .id = -1, }; static struct resource resources_uart1[] = { { .start = INT_UART1, .end = INT_UART1, .flags = IORESOURCE_IRQ, }, { .start = MSM_UART1_PHYS, .end = MSM_UART1_PHYS + MSM_UART1_SIZE - 1, .flags = IORESOURCE_MEM, }, }; static struct resource resources_uart2[] = { { .start = INT_UART2, .end = INT_UART2, .flags = IORESOURCE_IRQ, }, { .start = MSM_UART2_PHYS, .end = MSM_UART2_PHYS + MSM_UART2_SIZE - 1, .flags = IORESOURCE_MEM, }, }; static struct resource resources_uart3[] = { { .start = INT_UART3, .end = INT_UART3, .flags = IORESOURCE_IRQ, }, { .start = MSM_UART3_PHYS, .end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1, .flags = IORESOURCE_MEM, .name = "uart_resource" }, }; struct platform_device msm_device_uart1 = { .name = "msm_serial", .id = 0, .num_resources = ARRAY_SIZE(resources_uart1), .resource = resources_uart1, }; struct platform_device msm_device_uart2 = { .name = "msm_serial", .id = 1, .num_resources = ARRAY_SIZE(resources_uart2), .resource = resources_uart2, }; struct platform_device msm_device_uart3 = { .name = "msm_serial", .id = 2, .num_resources = ARRAY_SIZE(resources_uart3), .resource = resources_uart3, }; #define MSM_UART1DM_PHYS 0xA0200000 #define MSM_UART2DM_PHYS 0xA0900000 static struct resource msm_uart1_dm_resources[] = { { .start = MSM_UART1DM_PHYS, .end = MSM_UART1DM_PHYS + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_UART1DM_IRQ, .end = INT_UART1DM_IRQ, .flags = IORESOURCE_IRQ, }, { .start = INT_UART1DM_RX, .end = INT_UART1DM_RX, .flags = IORESOURCE_IRQ, }, { .start = DMOV_HSUART1_TX_CHAN, .end = DMOV_HSUART1_RX_CHAN, .name = "uartdm_channels", .flags = IORESOURCE_DMA, }, { .start = DMOV_HSUART1_TX_CRCI, .end = DMOV_HSUART1_RX_CRCI, .name = "uartdm_crci", .flags = IORESOURCE_DMA, }, }; static u64 msm_uart_dm1_dma_mask = DMA_BIT_MASK(32); struct platform_device msm_device_uart_dm1 = { .name = "msm_serial_hs", .id = 0, .num_resources = ARRAY_SIZE(msm_uart1_dm_resources), .resource = msm_uart1_dm_resources, .dev = { .dma_mask = &msm_uart_dm1_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; static struct resource msm_uart2_dm_resources[] = { { .start = MSM_UART2DM_PHYS, .end = MSM_UART2DM_PHYS + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_UART2DM_IRQ, .end = INT_UART2DM_IRQ, .flags = IORESOURCE_IRQ, }, { .start = INT_UART2DM_RX, .end = INT_UART2DM_RX, .flags = IORESOURCE_IRQ, }, { .start = DMOV_HSUART2_TX_CHAN, .end = DMOV_HSUART2_RX_CHAN, .name = "uartdm_channels", .flags = IORESOURCE_DMA, }, { .start = DMOV_HSUART2_TX_CRCI, .end = DMOV_HSUART2_RX_CRCI, .name = "uartdm_crci", .flags = IORESOURCE_DMA, }, }; static u64 msm_uart_dm2_dma_mask = DMA_BIT_MASK(32); struct platform_device msm_device_uart_dm2 = { .name = "msm_serial_hs", .id = 1, .num_resources = ARRAY_SIZE(msm_uart2_dm_resources), .resource = msm_uart2_dm_resources, .dev = { .dma_mask = &msm_uart_dm2_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; #define MSM_I2C_SIZE SZ_4K #define MSM_I2C_PHYS 0xA9900000 static struct resource resources_i2c[] = { { .start = MSM_I2C_PHYS, .end = MSM_I2C_PHYS + MSM_I2C_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_PWB_I2C, .end = INT_PWB_I2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_i2c = { .name = "msm_i2c", .id = 0, .num_resources = ARRAY_SIZE(resources_i2c), .resource = resources_i2c, }; #define MSM_HSUSB_PHYS 0xA0800000 static struct resource resources_hsusb_otg[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; static u64 dma_mask = 0xffffffffULL; struct platform_device msm_device_hsusb_otg = { .name = "msm_hsusb_otg", .id = -1, .num_resources = ARRAY_SIZE(resources_hsusb_otg), .resource = resources_hsusb_otg, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; static struct resource resources_hsusb_peripheral[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; static struct resource resources_gadget_peripheral[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_hsusb_peripheral = { .name = "msm_hsusb_peripheral", .id = -1, .num_resources = ARRAY_SIZE(resources_hsusb_peripheral), .resource = resources_hsusb_peripheral, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; struct platform_device msm_device_gadget_peripheral = { .name = "msm_hsusb", .id = -1, .num_resources = ARRAY_SIZE(resources_gadget_peripheral), .resource = resources_gadget_peripheral, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; #ifdef CONFIG_USB_FS_HOST #define MSM_HS2USB_PHYS 0xA0800400 static struct resource resources_hsusb_host2[] = { { .start = MSM_HS2USB_PHYS, .end = MSM_HS2USB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_OTG, .end = INT_USB_OTG, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_hsusb_host2 = { .name = "msm_hsusb_host", .id = 1, .num_resources = ARRAY_SIZE(resources_hsusb_host2), .resource = resources_hsusb_host2, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; #endif static struct resource resources_hsusb_host[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_hsusb_host = { .name = "msm_hsusb_host", .id = 0, .num_resources = ARRAY_SIZE(resources_hsusb_host), .resource = resources_hsusb_host, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; static struct platform_device *msm_host_devices[] = { &msm_device_hsusb_host, #ifdef CONFIG_USB_FS_HOST &msm_device_hsusb_host2, #endif }; int msm_add_host(unsigned int host, struct msm_usb_host_platform_data *plat) { struct platform_device *pdev; pdev = msm_host_devices[host]; if (!pdev) return -ENODEV; pdev->dev.platform_data = plat; return platform_device_register(pdev); } #ifdef CONFIG_USB_ANDROID struct usb_diag_platform_data usb_diag_pdata = { .ch_name = DIAG_LEGACY, .update_pid_and_serial_num = usb_diag_update_pid_and_serial_num, }; struct platform_device usb_diag_device = { .name = "usb_diag", .id = -1, .dev = { .platform_data = &usb_diag_pdata, }, }; #endif #ifdef CONFIG_USB_F_SERIAL static struct usb_gadget_fserial_platform_data fserial_pdata = { .no_ports = 2, }; struct platform_device usb_gadget_fserial_device = { .name = "usb_fserial", .id = -1, .dev = { .platform_data = &fserial_pdata, }, }; #endif #define MSM_NAND_PHYS 0xA0A00000 static struct resource resources_nand[] = { [0] = { .name = "msm_nand_dmac", .start = DMOV_NAND_CHAN, .end = DMOV_NAND_CHAN, .flags = IORESOURCE_DMA, }, [1] = { .name = "msm_nand_phys", .start = MSM_NAND_PHYS, .end = MSM_NAND_PHYS + 0x7FF, .flags = IORESOURCE_MEM, }, }; static struct resource resources_otg[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_otg = { .name = "msm_otg", .id = -1, .num_resources = ARRAY_SIZE(resources_otg), .resource = resources_otg, .dev = { .coherent_dma_mask = 0xffffffffULL, }, }; struct flash_platform_data msm_nand_data = { .parts = NULL, .nr_parts = 0, }; struct platform_device msm_device_nand = { .name = "msm_nand", .id = -1, .num_resources = ARRAY_SIZE(resources_nand), .resource = resources_nand, .dev = { .platform_data = &msm_nand_data, }, }; static struct msm_pm_irq_calls qsd8x50_pm_irq_calls = { .irq_pending = msm_irq_pending, .idle_sleep_allowed = msm_irq_idle_sleep_allowed, .enter_sleep1 = msm_irq_enter_sleep1, .enter_sleep2 = msm_irq_enter_sleep2, .exit_sleep1 = msm_irq_exit_sleep1, .exit_sleep2 = msm_irq_exit_sleep2, .exit_sleep3 = msm_irq_exit_sleep3, }; void __init msm_pm_register_irqs(void) { msm_pm_set_irq_extns(&qsd8x50_pm_irq_calls); } struct platform_device msm_device_smd = { .name = "msm_smd", .id = -1, }; static struct resource msm_dmov_resource[] = { { .start = INT_ADM_AARM, .flags = IORESOURCE_IRQ, }, { .start = 0xA9700000, .end = 0xA9700000 + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct msm_dmov_pdata msm_dmov_pdata = { .sd = 3, .sd_size = 0x400, }; struct platform_device msm_device_dmov = { .name = "msm_dmov", .id = -1, .resource = msm_dmov_resource, .num_resources = ARRAY_SIZE(msm_dmov_resource), .dev = { .platform_data = &msm_dmov_pdata, }, }; #define MSM_SDC1_BASE 0xA0300000 #define MSM_SDC2_BASE 0xA0400000 #define MSM_SDC3_BASE 0xA0500000 #define MSM_SDC4_BASE 0xA0600000 static struct resource resources_sdc1[] = { { .name = "core_mem", .start = MSM_SDC1_BASE, .end = MSM_SDC1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC1_0, .end = INT_SDC1_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_SDC1_CHAN, .end = DMOV_SDC1_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC1_CRCI, .end = DMOV_SDC1_CRCI, .flags = IORESOURCE_DMA, } }; static struct resource resources_sdc2[] = { { .name = "core_mem", .start = MSM_SDC2_BASE, .end = MSM_SDC2_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC2_0, .end = INT_SDC2_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_SDC2_CHAN, .end = DMOV_SDC2_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC2_CRCI, .end = DMOV_SDC2_CRCI, .flags = IORESOURCE_DMA, } }; static struct resource resources_sdc3[] = { { .name = "core_mem", .start = MSM_SDC3_BASE, .end = MSM_SDC3_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC3_0, .end = INT_SDC3_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_SDC3_CHAN, .end = DMOV_SDC3_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC3_CRCI, .end = DMOV_SDC3_CRCI, .flags = IORESOURCE_DMA, }, }; static struct resource resources_sdc4[] = { { .name = "core_mem", .start = MSM_SDC4_BASE, .end = MSM_SDC4_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC4_0, .end = INT_SDC4_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_SDC4_CHAN, .end = DMOV_SDC4_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC4_CRCI, .end = DMOV_SDC4_CRCI, .flags = IORESOURCE_DMA, }, }; struct platform_device msm_device_sdc1 = { .name = "msm_sdcc", .id = 1, .num_resources = ARRAY_SIZE(resources_sdc1), .resource = resources_sdc1, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc2 = { .name = "msm_sdcc", .id = 2, .num_resources = ARRAY_SIZE(resources_sdc2), .resource = resources_sdc2, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc3 = { .name = "msm_sdcc", .id = 3, .num_resources = ARRAY_SIZE(resources_sdc3), .resource = resources_sdc3, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc4 = { .name = "msm_sdcc", .id = 4, .num_resources = ARRAY_SIZE(resources_sdc4), .resource = resources_sdc4, .dev = { .coherent_dma_mask = 0xffffffff, }, }; static struct platform_device *msm_sdcc_devices[] __initdata = { &msm_device_sdc1, &msm_device_sdc2, &msm_device_sdc3, &msm_device_sdc4, }; int __init msm_add_sdcc(unsigned int controller, struct mmc_platform_data *plat) { struct platform_device *pdev; if (controller < 1 || controller > 4) return -EINVAL; pdev = msm_sdcc_devices[controller-1]; pdev->dev.platform_data = plat; return platform_device_register(pdev); } #if defined(CONFIG_FB_MSM_MDP40) #define MDP_BASE 0xA3F00000 #define PMDH_BASE 0xAD600000 #define EMDH_BASE 0xAD700000 #define TVENC_BASE 0xAD400000 #else #define MDP_BASE 0xAA200000 #define PMDH_BASE 0xAA600000 #define EMDH_BASE 0xAA700000 #define TVENC_BASE 0xAA400000 #endif static struct resource msm_mdp_resources[] = { { .name = "mdp", .start = MDP_BASE, .end = MDP_BASE + 0x000F0000 - 1, .flags = IORESOURCE_MEM, }, { .start = INT_MDP, .end = INT_MDP, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_mddi_resources[] = { { .name = "pmdh", .start = PMDH_BASE, .end = PMDH_BASE + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct resource msm_mddi_ext_resources[] = { { .name = "emdh", .start = EMDH_BASE, .end = EMDH_BASE + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct resource msm_ebi2_lcd_resources[] = { { .name = "base", .start = 0xa0d00000, .end = 0xa0d00000 + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, }, { .name = "lcd01", .start = 0x98000000, .end = 0x98000000 + 0x80000 - 1, .flags = IORESOURCE_MEM, }, { .name = "lcd02", .start = 0x9c000000, .end = 0x9c000000 + 0x80000 - 1, .flags = IORESOURCE_MEM, }, }; static struct resource msm_tvenc_resources[] = { { .name = "tvenc", .start = TVENC_BASE, .end = TVENC_BASE + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device msm_mdp_device = { .name = "mdp", .id = 0, .num_resources = ARRAY_SIZE(msm_mdp_resources), .resource = msm_mdp_resources, }; static struct platform_device msm_mddi_device = { .name = "mddi", .id = 0, .num_resources = ARRAY_SIZE(msm_mddi_resources), .resource = msm_mddi_resources, }; static struct platform_device msm_mddi_ext_device = { .name = "mddi_ext", .id = 0, .num_resources = ARRAY_SIZE(msm_mddi_ext_resources), .resource = msm_mddi_ext_resources, }; static struct platform_device msm_ebi2_lcd_device = { .name = "ebi2_lcd", .id = 0, .num_resources = ARRAY_SIZE(msm_ebi2_lcd_resources), .resource = msm_ebi2_lcd_resources, }; static struct platform_device msm_lcdc_device = { .name = "lcdc", .id = 0, }; static struct platform_device msm_tvenc_device = { .name = "tvenc", .id = 0, .num_resources = ARRAY_SIZE(msm_tvenc_resources), .resource = msm_tvenc_resources, }; #if defined(CONFIG_MSM_SOC_REV_A) #define MSM_QUP_PHYS 0xA1680000 #define MSM_GSBI_QUP_I2C_PHYS 0xA1600000 #define INT_PWB_QUP_ERR INT_GSBI_QUP #else #define MSM_QUP_PHYS 0xA9900000 #define MSM_GSBI_QUP_I2C_PHYS 0xA9900000 #define INT_PWB_QUP_ERR INT_PWB_I2C #endif #define MSM_QUP_SIZE SZ_4K static struct resource resources_qup[] = { { .name = "qup_phys_addr", .start = MSM_QUP_PHYS, .end = MSM_QUP_PHYS + MSM_QUP_SIZE - 1, .flags = IORESOURCE_MEM, }, { .name = "gsbi_qup_i2c_addr", .start = MSM_GSBI_QUP_I2C_PHYS, .end = MSM_GSBI_QUP_I2C_PHYS + 4 - 1, .flags = IORESOURCE_MEM, }, { .name = "qup_err_intr", .start = INT_PWB_QUP_ERR, .end = INT_PWB_QUP_ERR, .flags = IORESOURCE_IRQ, }, }; struct platform_device qup_device_i2c = { .name = "qup_i2c", .id = 4, .num_resources = ARRAY_SIZE(resources_qup), .resource = resources_qup, }; /* TSIF begin */ #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) #define MSM_TSIF_PHYS (0xa0100000) #define MSM_TSIF_SIZE (0x200) static struct resource tsif_resources[] = { [0] = { .flags = IORESOURCE_IRQ, .start = INT_TSIF_IRQ, .end = INT_TSIF_IRQ, }, [1] = { .flags = IORESOURCE_MEM, .start = MSM_TSIF_PHYS, .end = MSM_TSIF_PHYS + MSM_TSIF_SIZE - 1, }, [2] = { .flags = IORESOURCE_DMA, .start = DMOV_TSIF_CHAN, .end = DMOV_TSIF_CRCI, }, }; static void tsif_release(struct device *dev) { dev_info(dev, "release\n"); } struct platform_device msm_device_tsif = { .name = "msm_tsif", .id = 0, .num_resources = ARRAY_SIZE(tsif_resources), .resource = tsif_resources, .dev = { .release = tsif_release, }, }; #endif /* defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) */ /* TSIF end */ #define MSM_TSSC_PHYS 0xAA300000 static struct resource resources_tssc[] = { { .start = MSM_TSSC_PHYS, .end = MSM_TSSC_PHYS + SZ_4K - 1, .name = "tssc", .flags = IORESOURCE_MEM, }, { .start = INT_TCHSCRN1, .end = INT_TCHSCRN1, .name = "tssc1", .flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING, }, { .start = INT_TCHSCRN2, .end = INT_TCHSCRN2, .name = "tssc2", .flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING, }, }; struct platform_device msm_device_tssc = { .name = "msm_touchscreen", .id = 0, .num_resources = ARRAY_SIZE(resources_tssc), .resource = resources_tssc, }; static void __init msm_register_device(struct platform_device *pdev, void *data) { int ret; pdev->dev.platform_data = data; ret = platform_device_register(pdev); if (ret) dev_err(&pdev->dev, "%s: platform_device_register() failed = %d\n", __func__, ret); } void __init msm_fb_register_device(char *name, void *data) { if (!strncmp(name, "mdp", 3)) msm_register_device(&msm_mdp_device, data); else if (!strncmp(name, "pmdh", 4)) msm_register_device(&msm_mddi_device, data); else if (!strncmp(name, "emdh", 4)) msm_register_device(&msm_mddi_ext_device, data); else if (!strncmp(name, "ebi2", 4)) msm_register_device(&msm_ebi2_lcd_device, data); else if (!strncmp(name, "tvenc", 5)) msm_register_device(&msm_tvenc_device, data); else if (!strncmp(name, "lcdc", 4)) msm_register_device(&msm_lcdc_device, data); else printk(KERN_ERR "%s: unknown device! %s\n", __func__, name); } static struct platform_device msm_camera_device = { .name = "msm_camera", .id = 0, }; void __init msm_camera_register_device(void *res, uint32_t num, void *data) { msm_camera_device.num_resources = num; msm_camera_device.resource = res; msm_register_device(&msm_camera_device, data); } static struct resource kgsl_3d0_resources[] = { { .name = KGSL_3D0_REG_MEMORY, .start = 0xA0000000, .end = 0xA001ffff, .flags = IORESOURCE_MEM, }, { .name = KGSL_3D0_IRQ, .start = INT_GRAPHICS, .end = INT_GRAPHICS, .flags = IORESOURCE_IRQ, }, }; static struct kgsl_device_platform_data kgsl_3d0_pdata = { .pwrlevel = { { .gpu_freq = 0, .bus_freq = 128000000, }, }, .init_level = 0, .num_levels = 1, .set_grp_async = NULL, .idle_timeout = HZ/5, .clk_map = KGSL_CLK_CORE | KGSL_CLK_MEM, }; struct platform_device msm_kgsl_3d0 = { .name = "kgsl-3d0", .id = 0, .num_resources = ARRAY_SIZE(kgsl_3d0_resources), .resource = kgsl_3d0_resources, .dev = { .platform_data = &kgsl_3d0_pdata, }, };
gpl-2.0
jforge/linux
drivers/tty/serial/jsm/jsm_cls.c
1170
25122
/* * Copyright 2003 Digi International (www.digi.com) * Scott H Kilau <Scott_Kilau at digi dot com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU General Public License for more details. * * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE! * * This is shared code between Digi's CVS archive and the * Linux Kernel sources. * Changing the source just for reformatting needlessly breaks * our CVS diff history. * * Send any bug fixes/changes to: Eng.Linux at digi dot com. * Thank you. * */ #include <linux/delay.h> /* For udelay */ #include <linux/io.h> /* For read[bwl]/write[bwl] */ #include <linux/serial.h> /* For struct async_serial */ #include <linux/serial_reg.h> /* For the various UART offsets */ #include <linux/pci.h> #include <linux/tty.h> #include "jsm.h" /* Driver main header file */ static struct { unsigned int rate; unsigned int cflag; } baud_rates[] = { { 921600, B921600 }, { 460800, B460800 }, { 230400, B230400 }, { 115200, B115200 }, { 57600, B57600 }, { 38400, B38400 }, { 19200, B19200 }, { 9600, B9600 }, { 4800, B4800 }, { 2400, B2400 }, { 1200, B1200 }, { 600, B600 }, { 300, B300 }, { 200, B200 }, { 150, B150 }, { 134, B134 }, { 110, B110 }, { 75, B75 }, { 50, B50 }, }; static void cls_set_cts_flow_control(struct jsm_channel *ch) { u8 lcrb = readb(&ch->ch_cls_uart->lcr); u8 ier = readb(&ch->ch_cls_uart->ier); u8 isr_fcr = 0; /* * The Enhanced Register Set may only be accessed when * the Line Control Register is set to 0xBFh. */ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); /* Turn on CTS flow control, turn off IXON flow control */ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_CTSDSR); isr_fcr &= ~(UART_EXAR654_EFR_IXON); writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); /* Write old LCR value back out, which turns enhanced access off */ writeb(lcrb, &ch->ch_cls_uart->lcr); /* * Enable interrupts for CTS flow, turn off interrupts for * received XOFF chars */ ier |= (UART_EXAR654_IER_CTSDSR); ier &= ~(UART_EXAR654_IER_XOFF); writeb(ier, &ch->ch_cls_uart->ier); /* Set the usual FIFO values */ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 | UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr); ch->ch_t_tlevel = 16; } static void cls_set_ixon_flow_control(struct jsm_channel *ch) { u8 lcrb = readb(&ch->ch_cls_uart->lcr); u8 ier = readb(&ch->ch_cls_uart->ier); u8 isr_fcr = 0; /* * The Enhanced Register Set may only be accessed when * the Line Control Register is set to 0xBFh. */ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); /* Turn on IXON flow control, turn off CTS flow control */ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXON); isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR); writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); /* Now set our current start/stop chars while in enhanced mode */ writeb(ch->ch_startc, &ch->ch_cls_uart->mcr); writeb(0, &ch->ch_cls_uart->lsr); writeb(ch->ch_stopc, &ch->ch_cls_uart->msr); writeb(0, &ch->ch_cls_uart->spr); /* Write old LCR value back out, which turns enhanced access off */ writeb(lcrb, &ch->ch_cls_uart->lcr); /* * Disable interrupts for CTS flow, turn on interrupts for * received XOFF chars */ ier &= ~(UART_EXAR654_IER_CTSDSR); ier |= (UART_EXAR654_IER_XOFF); writeb(ier, &ch->ch_cls_uart->ier); /* Set the usual FIFO values */ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 | UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr); } static void cls_set_no_output_flow_control(struct jsm_channel *ch) { u8 lcrb = readb(&ch->ch_cls_uart->lcr); u8 ier = readb(&ch->ch_cls_uart->ier); u8 isr_fcr = 0; /* * The Enhanced Register Set may only be accessed when * the Line Control Register is set to 0xBFh. */ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); /* Turn off IXON flow control, turn off CTS flow control */ isr_fcr |= (UART_EXAR654_EFR_ECB); isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR | UART_EXAR654_EFR_IXON); writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); /* Write old LCR value back out, which turns enhanced access off */ writeb(lcrb, &ch->ch_cls_uart->lcr); /* * Disable interrupts for CTS flow, turn off interrupts for * received XOFF chars */ ier &= ~(UART_EXAR654_IER_CTSDSR); ier &= ~(UART_EXAR654_IER_XOFF); writeb(ier, &ch->ch_cls_uart->ier); /* Set the usual FIFO values */ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 | UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr); ch->ch_r_watermark = 0; ch->ch_t_tlevel = 16; ch->ch_r_tlevel = 16; } static void cls_set_rts_flow_control(struct jsm_channel *ch) { u8 lcrb = readb(&ch->ch_cls_uart->lcr); u8 ier = readb(&ch->ch_cls_uart->ier); u8 isr_fcr = 0; /* * The Enhanced Register Set may only be accessed when * the Line Control Register is set to 0xBFh. */ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); /* Turn on RTS flow control, turn off IXOFF flow control */ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_RTSDTR); isr_fcr &= ~(UART_EXAR654_EFR_IXOFF); writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); /* Write old LCR value back out, which turns enhanced access off */ writeb(lcrb, &ch->ch_cls_uart->lcr); /* Enable interrupts for RTS flow */ ier |= (UART_EXAR654_IER_RTSDTR); writeb(ier, &ch->ch_cls_uart->ier); /* Set the usual FIFO values */ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 | UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr); ch->ch_r_watermark = 4; ch->ch_r_tlevel = 8; } static void cls_set_ixoff_flow_control(struct jsm_channel *ch) { u8 lcrb = readb(&ch->ch_cls_uart->lcr); u8 ier = readb(&ch->ch_cls_uart->ier); u8 isr_fcr = 0; /* * The Enhanced Register Set may only be accessed when * the Line Control Register is set to 0xBFh. */ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); /* Turn on IXOFF flow control, turn off RTS flow control */ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXOFF); isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR); writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); /* Now set our current start/stop chars while in enhanced mode */ writeb(ch->ch_startc, &ch->ch_cls_uart->mcr); writeb(0, &ch->ch_cls_uart->lsr); writeb(ch->ch_stopc, &ch->ch_cls_uart->msr); writeb(0, &ch->ch_cls_uart->spr); /* Write old LCR value back out, which turns enhanced access off */ writeb(lcrb, &ch->ch_cls_uart->lcr); /* Disable interrupts for RTS flow */ ier &= ~(UART_EXAR654_IER_RTSDTR); writeb(ier, &ch->ch_cls_uart->ier); /* Set the usual FIFO values */ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 | UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr); } static void cls_set_no_input_flow_control(struct jsm_channel *ch) { u8 lcrb = readb(&ch->ch_cls_uart->lcr); u8 ier = readb(&ch->ch_cls_uart->ier); u8 isr_fcr = 0; /* * The Enhanced Register Set may only be accessed when * the Line Control Register is set to 0xBFh. */ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); /* Turn off IXOFF flow control, turn off RTS flow control */ isr_fcr |= (UART_EXAR654_EFR_ECB); isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR | UART_EXAR654_EFR_IXOFF); writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); /* Write old LCR value back out, which turns enhanced access off */ writeb(lcrb, &ch->ch_cls_uart->lcr); /* Disable interrupts for RTS flow */ ier &= ~(UART_EXAR654_IER_RTSDTR); writeb(ier, &ch->ch_cls_uart->ier); /* Set the usual FIFO values */ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 | UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr); ch->ch_t_tlevel = 16; ch->ch_r_tlevel = 16; } /* * cls_clear_break. * Determines whether its time to shut off break condition. * * No locks are assumed to be held when calling this function. * channel lock is held and released in this function. */ static void cls_clear_break(struct jsm_channel *ch) { unsigned long lock_flags; spin_lock_irqsave(&ch->ch_lock, lock_flags); /* Turn break off, and unset some variables */ if (ch->ch_flags & CH_BREAK_SENDING) { u8 temp = readb(&ch->ch_cls_uart->lcr); writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr); ch->ch_flags &= ~(CH_BREAK_SENDING); jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "clear break Finishing UART_LCR_SBC! finished: %lx\n", jiffies); } spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } static void cls_disable_receiver(struct jsm_channel *ch) { u8 tmp = readb(&ch->ch_cls_uart->ier); tmp &= ~(UART_IER_RDI); writeb(tmp, &ch->ch_cls_uart->ier); } static void cls_enable_receiver(struct jsm_channel *ch) { u8 tmp = readb(&ch->ch_cls_uart->ier); tmp |= (UART_IER_RDI); writeb(tmp, &ch->ch_cls_uart->ier); } /* Make the UART raise any of the output signals we want up */ static void cls_assert_modem_signals(struct jsm_channel *ch) { if (!ch) return; writeb(ch->ch_mostat, &ch->ch_cls_uart->mcr); } static void cls_copy_data_from_uart_to_queue(struct jsm_channel *ch) { int qleft = 0; u8 linestatus = 0; u8 error_mask = 0; u16 head; u16 tail; unsigned long flags; if (!ch) return; spin_lock_irqsave(&ch->ch_lock, flags); /* cache head and tail of queue */ head = ch->ch_r_head & RQUEUEMASK; tail = ch->ch_r_tail & RQUEUEMASK; /* Get our cached LSR */ linestatus = ch->ch_cached_lsr; ch->ch_cached_lsr = 0; /* Store how much space we have left in the queue */ qleft = tail - head - 1; if (qleft < 0) qleft += RQUEUEMASK + 1; /* * Create a mask to determine whether we should * insert the character (if any) into our queue. */ if (ch->ch_c_iflag & IGNBRK) error_mask |= UART_LSR_BI; while (1) { /* * Grab the linestatus register, we need to * check to see if there is any data to read */ linestatus = readb(&ch->ch_cls_uart->lsr); /* Break out if there is no data to fetch */ if (!(linestatus & UART_LSR_DR)) break; /* * Discard character if we are ignoring the error mask * which in this case is the break signal. */ if (linestatus & error_mask) { u8 discard; linestatus = 0; discard = readb(&ch->ch_cls_uart->txrx); continue; } /* * If our queue is full, we have no choice but to drop some * data. The assumption is that HWFLOW or SWFLOW should have * stopped things way way before we got to this point. * * I decided that I wanted to ditch the oldest data first, * I hope thats okay with everyone? Yes? Good. */ while (qleft < 1) { tail = (tail + 1) & RQUEUEMASK; ch->ch_r_tail = tail; ch->ch_err_overrun++; qleft++; } ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE); ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx); qleft--; if (ch->ch_equeue[head] & UART_LSR_PE) ch->ch_err_parity++; if (ch->ch_equeue[head] & UART_LSR_BI) ch->ch_err_break++; if (ch->ch_equeue[head] & UART_LSR_FE) ch->ch_err_frame++; /* Add to, and flip head if needed */ head = (head + 1) & RQUEUEMASK; ch->ch_rxcount++; } /* * Write new final heads to channel structure. */ ch->ch_r_head = head & RQUEUEMASK; ch->ch_e_head = head & EQUEUEMASK; spin_unlock_irqrestore(&ch->ch_lock, flags); } static void cls_copy_data_from_queue_to_uart(struct jsm_channel *ch) { u16 tail; int n; int qlen; u32 len_written = 0; struct circ_buf *circ; if (!ch) return; circ = &ch->uart_port.state->xmit; /* No data to write to the UART */ if (uart_circ_empty(circ)) return; /* If port is "stopped", don't send any data to the UART */ if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) return; /* We have to do it this way, because of the EXAR TXFIFO count bug. */ if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) return; n = 32; /* cache tail of queue */ tail = circ->tail & (UART_XMIT_SIZE - 1); qlen = uart_circ_chars_pending(circ); /* Find minimum of the FIFO space, versus queue length */ n = min(n, qlen); while (n > 0) { writeb(circ->buf[tail], &ch->ch_cls_uart->txrx); tail = (tail + 1) & (UART_XMIT_SIZE - 1); n--; ch->ch_txcount++; len_written++; } /* Update the final tail */ circ->tail = tail & (UART_XMIT_SIZE - 1); if (len_written > ch->ch_t_tlevel) ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); if (uart_circ_empty(circ)) uart_write_wakeup(&ch->uart_port); } static void cls_parse_modem(struct jsm_channel *ch, u8 signals) { u8 msignals = signals; jsm_dbg(MSIGS, &ch->ch_bd->pci_dev, "neo_parse_modem: port: %d msignals: %x\n", ch->ch_portnum, msignals); /* * Scrub off lower bits. * They signify delta's, which I don't care about * Keep DDCD and DDSR though */ msignals &= 0xf8; if (msignals & UART_MSR_DDCD) uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_DCD); if (msignals & UART_MSR_DDSR) uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_CTS); if (msignals & UART_MSR_DCD) ch->ch_mistat |= UART_MSR_DCD; else ch->ch_mistat &= ~UART_MSR_DCD; if (msignals & UART_MSR_DSR) ch->ch_mistat |= UART_MSR_DSR; else ch->ch_mistat &= ~UART_MSR_DSR; if (msignals & UART_MSR_RI) ch->ch_mistat |= UART_MSR_RI; else ch->ch_mistat &= ~UART_MSR_RI; if (msignals & UART_MSR_CTS) ch->ch_mistat |= UART_MSR_CTS; else ch->ch_mistat &= ~UART_MSR_CTS; jsm_dbg(MSIGS, &ch->ch_bd->pci_dev, "Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n", ch->ch_portnum, !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR), !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD)); } /* Parse the ISR register for the specific port */ static inline void cls_parse_isr(struct jsm_board *brd, uint port) { struct jsm_channel *ch; u8 isr = 0; unsigned long flags; /* * No need to verify board pointer, it was already * verified in the interrupt routine. */ if (port >= brd->nasync) return; ch = brd->channels[port]; if (!ch) return; /* Here we try to figure out what caused the interrupt to happen */ while (1) { isr = readb(&ch->ch_cls_uart->isr_fcr); /* Bail if no pending interrupt on port */ if (isr & UART_IIR_NO_INT) break; /* Receive Interrupt pending */ if (isr & (UART_IIR_RDI | UART_IIR_RDI_TIMEOUT)) { /* Read data from uart -> queue */ cls_copy_data_from_uart_to_queue(ch); jsm_check_queue_flow_control(ch); } /* Transmit Hold register empty pending */ if (isr & UART_IIR_THRI) { /* Transfer data (if any) from Write Queue -> UART. */ spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); spin_unlock_irqrestore(&ch->ch_lock, flags); cls_copy_data_from_queue_to_uart(ch); } /* * CTS/RTS change of state: * Don't need to do anything, the cls_parse_modem * below will grab the updated modem signals. */ /* Parse any modem signal changes */ cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr)); } } /* Channel lock MUST be held before calling this function! */ static void cls_flush_uart_write(struct jsm_channel *ch) { u8 tmp = 0; u8 i = 0; if (!ch) return; writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr); for (i = 0; i < 10; i++) { /* Check to see if the UART feels it completely flushed FIFO */ tmp = readb(&ch->ch_cls_uart->isr_fcr); if (tmp & UART_FCR_CLEAR_XMIT) { jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "Still flushing TX UART... i: %d\n", i); udelay(10); } else break; } ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); } /* Channel lock MUST be held before calling this function! */ static void cls_flush_uart_read(struct jsm_channel *ch) { if (!ch) return; /* * For complete POSIX compatibility, we should be purging the * read FIFO in the UART here. * * However, clearing the read FIFO (UART_FCR_CLEAR_RCVR) also * incorrectly flushes write data as well as just basically trashing the * FIFO. * * Presumably, this is a bug in this UART. */ udelay(10); } static void cls_send_start_character(struct jsm_channel *ch) { if (!ch) return; if (ch->ch_startc != __DISABLED_CHAR) { ch->ch_xon_sends++; writeb(ch->ch_startc, &ch->ch_cls_uart->txrx); } } static void cls_send_stop_character(struct jsm_channel *ch) { if (!ch) return; if (ch->ch_stopc != __DISABLED_CHAR) { ch->ch_xoff_sends++; writeb(ch->ch_stopc, &ch->ch_cls_uart->txrx); } } /* * cls_param() * Send any/all changes to the line to the UART. */ static void cls_param(struct jsm_channel *ch) { u8 lcr = 0; u8 uart_lcr = 0; u8 ier = 0; u32 baud = 9600; int quot = 0; struct jsm_board *bd; int i; unsigned int cflag; bd = ch->ch_bd; if (!bd) return; /* * If baud rate is zero, flush queues, and set mval to drop DTR. */ if ((ch->ch_c_cflag & (CBAUD)) == 0) { ch->ch_r_head = 0; ch->ch_r_tail = 0; ch->ch_e_head = 0; ch->ch_e_tail = 0; cls_flush_uart_write(ch); cls_flush_uart_read(ch); /* The baudrate is B0 so all modem lines are to be dropped. */ ch->ch_flags |= (CH_BAUD0); ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR); cls_assert_modem_signals(ch); return; } cflag = C_BAUD(ch->uart_port.state->port.tty); baud = 9600; for (i = 0; i < ARRAY_SIZE(baud_rates); i++) { if (baud_rates[i].cflag == cflag) { baud = baud_rates[i].rate; break; } } if (ch->ch_flags & CH_BAUD0) ch->ch_flags &= ~(CH_BAUD0); if (ch->ch_c_cflag & PARENB) lcr |= UART_LCR_PARITY; if (!(ch->ch_c_cflag & PARODD)) lcr |= UART_LCR_EPAR; /* * Not all platforms support mark/space parity, * so this will hide behind an ifdef. */ #ifdef CMSPAR if (ch->ch_c_cflag & CMSPAR) lcr |= UART_LCR_SPAR; #endif if (ch->ch_c_cflag & CSTOPB) lcr |= UART_LCR_STOP; switch (ch->ch_c_cflag & CSIZE) { case CS5: lcr |= UART_LCR_WLEN5; break; case CS6: lcr |= UART_LCR_WLEN6; break; case CS7: lcr |= UART_LCR_WLEN7; break; case CS8: default: lcr |= UART_LCR_WLEN8; break; } ier = readb(&ch->ch_cls_uart->ier); uart_lcr = readb(&ch->ch_cls_uart->lcr); quot = ch->ch_bd->bd_dividend / baud; if (quot != 0) { writeb(UART_LCR_DLAB, &ch->ch_cls_uart->lcr); writeb((quot & 0xff), &ch->ch_cls_uart->txrx); writeb((quot >> 8), &ch->ch_cls_uart->ier); writeb(lcr, &ch->ch_cls_uart->lcr); } if (uart_lcr != lcr) writeb(lcr, &ch->ch_cls_uart->lcr); if (ch->ch_c_cflag & CREAD) ier |= (UART_IER_RDI | UART_IER_RLSI); ier |= (UART_IER_THRI | UART_IER_MSI); writeb(ier, &ch->ch_cls_uart->ier); if (ch->ch_c_cflag & CRTSCTS) cls_set_cts_flow_control(ch); else if (ch->ch_c_iflag & IXON) { /* * If start/stop is set to disable, * then we should disable flow control. */ if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR)) cls_set_no_output_flow_control(ch); else cls_set_ixon_flow_control(ch); } else cls_set_no_output_flow_control(ch); if (ch->ch_c_cflag & CRTSCTS) cls_set_rts_flow_control(ch); else if (ch->ch_c_iflag & IXOFF) { /* * If start/stop is set to disable, * then we should disable flow control. */ if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR)) cls_set_no_input_flow_control(ch); else cls_set_ixoff_flow_control(ch); } else cls_set_no_input_flow_control(ch); cls_assert_modem_signals(ch); /* get current status of the modem signals now */ cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr)); } /* * cls_intr() * * Classic specific interrupt handler. */ static irqreturn_t cls_intr(int irq, void *voidbrd) { struct jsm_board *brd = voidbrd; unsigned long lock_flags; unsigned char uart_poll; uint i = 0; /* Lock out the slow poller from running on this board. */ spin_lock_irqsave(&brd->bd_intr_lock, lock_flags); /* * Check the board's global interrupt offset to see if we * acctually do have an interrupt pending on us. */ uart_poll = readb(brd->re_map_membase + UART_CLASSIC_POLL_ADDR_OFFSET); jsm_dbg(INTR, &brd->pci_dev, "%s:%d uart_poll: %x\n", __FILE__, __LINE__, uart_poll); if (!uart_poll) { jsm_dbg(INTR, &brd->pci_dev, "Kernel interrupted to me, but no pending interrupts...\n"); spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags); return IRQ_NONE; } /* At this point, we have at least SOMETHING to service, dig further. */ /* Parse each port to find out what caused the interrupt */ for (i = 0; i < brd->nasync; i++) cls_parse_isr(brd, i); spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags); return IRQ_HANDLED; } /* Inits UART */ static void cls_uart_init(struct jsm_channel *ch) { unsigned char lcrb = readb(&ch->ch_cls_uart->lcr); unsigned char isr_fcr = 0; writeb(0, &ch->ch_cls_uart->ier); /* * The Enhanced Register Set may only be accessed when * the Line Control Register is set to 0xBFh. */ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); /* Turn on Enhanced/Extended controls */ isr_fcr |= (UART_EXAR654_EFR_ECB); writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); /* Write old LCR value back out, which turns enhanced access off */ writeb(lcrb, &ch->ch_cls_uart->lcr); /* Clear out UART and FIFO */ readb(&ch->ch_cls_uart->txrx); writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr); udelay(10); ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); readb(&ch->ch_cls_uart->lsr); readb(&ch->ch_cls_uart->msr); } /* * Turns off UART. */ static void cls_uart_off(struct jsm_channel *ch) { /* Stop all interrupts from accurring. */ writeb(0, &ch->ch_cls_uart->ier); } /* * cls_get_uarts_bytes_left. * Returns 0 is nothing left in the FIFO, returns 1 otherwise. * * The channel lock MUST be held by the calling function. */ static u32 cls_get_uart_bytes_left(struct jsm_channel *ch) { u8 left = 0; u8 lsr = readb(&ch->ch_cls_uart->lsr); /* Determine whether the Transmitter is empty or not */ if (!(lsr & UART_LSR_TEMT)) left = 1; else { ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); left = 0; } return left; } /* * cls_send_break. * Starts sending a break thru the UART. * * The channel lock MUST be held by the calling function. */ static void cls_send_break(struct jsm_channel *ch) { /* Tell the UART to start sending the break */ if (!(ch->ch_flags & CH_BREAK_SENDING)) { u8 temp = readb(&ch->ch_cls_uart->lcr); writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr); ch->ch_flags |= (CH_BREAK_SENDING); } } /* * cls_send_immediate_char. * Sends a specific character as soon as possible to the UART, * jumping over any bytes that might be in the write queue. * * The channel lock MUST be held by the calling function. */ static void cls_send_immediate_char(struct jsm_channel *ch, unsigned char c) { writeb(c, &ch->ch_cls_uart->txrx); } struct board_ops jsm_cls_ops = { .intr = cls_intr, .uart_init = cls_uart_init, .uart_off = cls_uart_off, .param = cls_param, .assert_modem_signals = cls_assert_modem_signals, .flush_uart_write = cls_flush_uart_write, .flush_uart_read = cls_flush_uart_read, .disable_receiver = cls_disable_receiver, .enable_receiver = cls_enable_receiver, .send_break = cls_send_break, .clear_break = cls_clear_break, .send_start_character = cls_send_start_character, .send_stop_character = cls_send_stop_character, .copy_data_from_queue_to_uart = cls_copy_data_from_queue_to_uart, .get_uart_bytes_left = cls_get_uart_bytes_left, .send_immediate_char = cls_send_immediate_char };
gpl-2.0
HurryNwait/kernel-crespo-jellybean
sound/core/jack.c
2450
6261
/* * Jack abstraction layer * * Copyright 2008 Wolfson Microelectronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/input.h> #include <linux/slab.h> #include <sound/jack.h> #include <sound/core.h> static int jack_switch_types[] = { SW_HEADPHONE_INSERT, SW_MICROPHONE_INSERT, SW_LINEOUT_INSERT, SW_JACK_PHYSICAL_INSERT, SW_VIDEOOUT_INSERT, }; static int snd_jack_dev_free(struct snd_device *device) { struct snd_jack *jack = device->device_data; if (jack->private_free) jack->private_free(jack); /* If the input device is registered with the input subsystem * then we need to use a different deallocator. */ if (jack->registered) input_unregister_device(jack->input_dev); else input_free_device(jack->input_dev); kfree(jack->id); kfree(jack); return 0; } static int snd_jack_dev_register(struct snd_device *device) { struct snd_jack *jack = device->device_data; struct snd_card *card = device->card; int err, i; snprintf(jack->name, sizeof(jack->name), "%s %s", card->shortname, jack->id); jack->input_dev->name = jack->name; /* Default to the sound card device. */ if (!jack->input_dev->dev.parent) jack->input_dev->dev.parent = snd_card_get_device_link(card); /* Add capabilities for any keys that are enabled */ for (i = 0; i < ARRAY_SIZE(jack->key); i++) { int testbit = SND_JACK_BTN_0 >> i; if (!(jack->type & testbit)) continue; if (!jack->key[i]) jack->key[i] = BTN_0 + i; input_set_capability(jack->input_dev, EV_KEY, jack->key[i]); } err = input_register_device(jack->input_dev); if (err == 0) jack->registered = 1; return err; } /** * snd_jack_new - Create a new jack * @card: the card instance * @id: an identifying string for this jack * @type: a bitmask of enum snd_jack_type values that can be detected by * this jack * @jjack: Used to provide the allocated jack object to the caller. * * Creates a new jack object. * * Returns zero if successful, or a negative error code on failure. * On success jjack will be initialised. */ int snd_jack_new(struct snd_card *card, const char *id, int type, struct snd_jack **jjack) { struct snd_jack *jack; int err; int i; static struct snd_device_ops ops = { .dev_free = snd_jack_dev_free, .dev_register = snd_jack_dev_register, }; jack = kzalloc(sizeof(struct snd_jack), GFP_KERNEL); if (jack == NULL) return -ENOMEM; jack->id = kstrdup(id, GFP_KERNEL); jack->input_dev = input_allocate_device(); if (jack->input_dev == NULL) { err = -ENOMEM; goto fail_input; } jack->input_dev->phys = "ALSA"; jack->type = type; for (i = 0; i < ARRAY_SIZE(jack_switch_types); i++) if (type & (1 << i)) input_set_capability(jack->input_dev, EV_SW, jack_switch_types[i]); err = snd_device_new(card, SNDRV_DEV_JACK, jack, &ops); if (err < 0) goto fail_input; *jjack = jack; return 0; fail_input: input_free_device(jack->input_dev); kfree(jack->id); kfree(jack); return err; } EXPORT_SYMBOL(snd_jack_new); /** * snd_jack_set_parent - Set the parent device for a jack * * @jack: The jack to configure * @parent: The device to set as parent for the jack. * * Set the parent for the jack input device in the device tree. This * function is only valid prior to registration of the jack. If no * parent is configured then the parent device will be the sound card. */ void snd_jack_set_parent(struct snd_jack *jack, struct device *parent) { WARN_ON(jack->registered); jack->input_dev->dev.parent = parent; } EXPORT_SYMBOL(snd_jack_set_parent); /** * snd_jack_set_key - Set a key mapping on a jack * * @jack: The jack to configure * @type: Jack report type for this key * @keytype: Input layer key type to be reported * * Map a SND_JACK_BTN_ button type to an input layer key, allowing * reporting of keys on accessories via the jack abstraction. If no * mapping is provided but keys are enabled in the jack type then * BTN_n numeric buttons will be reported. * * Note that this is intended to be use by simple devices with small * numbers of keys that can be reported. It is also possible to * access the input device directly - devices with complex input * capabilities on accessories should consider doing this rather than * using this abstraction. * * This function may only be called prior to registration of the jack. */ int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type, int keytype) { int key = fls(SND_JACK_BTN_0) - fls(type); WARN_ON(jack->registered); if (!keytype || key >= ARRAY_SIZE(jack->key)) return -EINVAL; jack->type |= type; jack->key[key] = keytype; return 0; } EXPORT_SYMBOL(snd_jack_set_key); /** * snd_jack_report - Report the current status of a jack * * @jack: The jack to report status for * @status: The current status of the jack */ void snd_jack_report(struct snd_jack *jack, int status) { int i; if (!jack) return; for (i = 0; i < ARRAY_SIZE(jack->key); i++) { int testbit = SND_JACK_BTN_0 >> i; if (jack->type & testbit) input_report_key(jack->input_dev, jack->key[i], status & testbit); } for (i = 0; i < ARRAY_SIZE(jack_switch_types); i++) { int testbit = 1 << i; if (jack->type & testbit) input_report_switch(jack->input_dev, jack_switch_types[i], status & testbit); } input_sync(jack->input_dev); } EXPORT_SYMBOL(snd_jack_report); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("Jack detection support for ALSA"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_lge_v500
drivers/usb/core/sysfs.c
3218
24196
/* * drivers/usb/core/sysfs.c * * (C) Copyright 2002 David Brownell * (C) Copyright 2002,2004 Greg Kroah-Hartman * (C) Copyright 2002,2004 IBM Corp. * * All of the sysfs file attributes for usb devices and interfaces. * */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/usb/quirks.h> #include "usb.h" /* Active configuration fields */ #define usb_actconfig_show(field, multiplier, format_string) \ static ssize_t show_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct usb_device *udev; \ struct usb_host_config *actconfig; \ \ udev = to_usb_device(dev); \ actconfig = udev->actconfig; \ if (actconfig) \ return sprintf(buf, format_string, \ actconfig->desc.field * multiplier); \ else \ return 0; \ } \ #define usb_actconfig_attr(field, multiplier, format_string) \ usb_actconfig_show(field, multiplier, format_string) \ static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); usb_actconfig_attr(bNumInterfaces, 1, "%2d\n") usb_actconfig_attr(bmAttributes, 1, "%2x\n") usb_actconfig_attr(bMaxPower, 2, "%3dmA\n") static ssize_t show_configuration_string(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; struct usb_host_config *actconfig; udev = to_usb_device(dev); actconfig = udev->actconfig; if ((!actconfig) || (!actconfig->string)) return 0; return sprintf(buf, "%s\n", actconfig->string); } static DEVICE_ATTR(configuration, S_IRUGO, show_configuration_string, NULL); /* configuration value is always present, and r/w */ usb_actconfig_show(bConfigurationValue, 1, "%u\n"); static ssize_t set_bConfigurationValue(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_device *udev = to_usb_device(dev); int config, value; if (sscanf(buf, "%d", &config) != 1 || config < -1 || config > 255) return -EINVAL; usb_lock_device(udev); value = usb_set_configuration(udev, config); usb_unlock_device(udev); return (value < 0) ? value : count; } static DEVICE_ATTR(bConfigurationValue, S_IRUGO | S_IWUSR, show_bConfigurationValue, set_bConfigurationValue); /* String fields */ #define usb_string_attr(name) \ static ssize_t show_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct usb_device *udev; \ int retval; \ \ udev = to_usb_device(dev); \ usb_lock_device(udev); \ retval = sprintf(buf, "%s\n", udev->name); \ usb_unlock_device(udev); \ return retval; \ } \ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); usb_string_attr(product); usb_string_attr(manufacturer); usb_string_attr(serial); static ssize_t show_speed(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; char *speed; udev = to_usb_device(dev); switch (udev->speed) { case USB_SPEED_LOW: speed = "1.5"; break; case USB_SPEED_UNKNOWN: case USB_SPEED_FULL: speed = "12"; break; case USB_SPEED_HIGH: speed = "480"; break; case USB_SPEED_WIRELESS: speed = "480"; break; case USB_SPEED_SUPER: speed = "5000"; break; default: speed = "unknown"; } return sprintf(buf, "%s\n", speed); } static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL); static ssize_t show_busnum(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; udev = to_usb_device(dev); return sprintf(buf, "%d\n", udev->bus->busnum); } static DEVICE_ATTR(busnum, S_IRUGO, show_busnum, NULL); static ssize_t show_devnum(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; udev = to_usb_device(dev); return sprintf(buf, "%d\n", udev->devnum); } static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL); static ssize_t show_devpath(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; udev = to_usb_device(dev); return sprintf(buf, "%s\n", udev->devpath); } static DEVICE_ATTR(devpath, S_IRUGO, show_devpath, NULL); static ssize_t show_version(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; u16 bcdUSB; udev = to_usb_device(dev); bcdUSB = le16_to_cpu(udev->descriptor.bcdUSB); return sprintf(buf, "%2x.%02x\n", bcdUSB >> 8, bcdUSB & 0xff); } static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); static ssize_t show_maxchild(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; udev = to_usb_device(dev); return sprintf(buf, "%d\n", udev->maxchild); } static DEVICE_ATTR(maxchild, S_IRUGO, show_maxchild, NULL); static ssize_t show_quirks(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; udev = to_usb_device(dev); return sprintf(buf, "0x%x\n", udev->quirks); } static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL); static ssize_t show_avoid_reset_quirk(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; udev = to_usb_device(dev); return sprintf(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET_MORPHS)); } static ssize_t set_avoid_reset_quirk(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_device *udev = to_usb_device(dev); int config; if (sscanf(buf, "%d", &config) != 1 || config < 0 || config > 1) return -EINVAL; usb_lock_device(udev); if (config) udev->quirks |= USB_QUIRK_RESET_MORPHS; else udev->quirks &= ~USB_QUIRK_RESET_MORPHS; usb_unlock_device(udev); return count; } static DEVICE_ATTR(avoid_reset_quirk, S_IRUGO | S_IWUSR, show_avoid_reset_quirk, set_avoid_reset_quirk); static ssize_t show_urbnum(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; udev = to_usb_device(dev); return sprintf(buf, "%d\n", atomic_read(&udev->urbnum)); } static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL); static ssize_t show_removable(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; char *state; udev = to_usb_device(dev); switch (udev->removable) { case USB_DEVICE_REMOVABLE: state = "removable"; break; case USB_DEVICE_FIXED: state = "fixed"; break; default: state = "unknown"; } return sprintf(buf, "%s\n", state); } static DEVICE_ATTR(removable, S_IRUGO, show_removable, NULL); #ifdef CONFIG_PM static ssize_t show_persist(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev = to_usb_device(dev); return sprintf(buf, "%d\n", udev->persist_enabled); } static ssize_t set_persist(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_device *udev = to_usb_device(dev); int value; /* Hubs are always enabled for USB_PERSIST */ if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) return -EPERM; if (sscanf(buf, "%d", &value) != 1) return -EINVAL; usb_lock_device(udev); udev->persist_enabled = !!value; usb_unlock_device(udev); return count; } static DEVICE_ATTR(persist, S_IRUGO | S_IWUSR, show_persist, set_persist); static int add_persist_attributes(struct device *dev) { int rc = 0; if (is_usb_device(dev)) { struct usb_device *udev = to_usb_device(dev); /* Hubs are automatically enabled for USB_PERSIST, * no point in creating the attribute file. */ if (udev->descriptor.bDeviceClass != USB_CLASS_HUB) rc = sysfs_add_file_to_group(&dev->kobj, &dev_attr_persist.attr, power_group_name); } return rc; } static void remove_persist_attributes(struct device *dev) { sysfs_remove_file_from_group(&dev->kobj, &dev_attr_persist.attr, power_group_name); } #else #define add_persist_attributes(dev) 0 #define remove_persist_attributes(dev) do {} while (0) #endif /* CONFIG_PM */ #ifdef CONFIG_USB_SUSPEND static ssize_t show_connected_duration(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev = to_usb_device(dev); return sprintf(buf, "%u\n", jiffies_to_msecs(jiffies - udev->connect_time)); } static DEVICE_ATTR(connected_duration, S_IRUGO, show_connected_duration, NULL); /* * If the device is resumed, the last time the device was suspended has * been pre-subtracted from active_duration. We add the current time to * get the duration that the device was actually active. * * If the device is suspended, the active_duration is up-to-date. */ static ssize_t show_active_duration(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev = to_usb_device(dev); int duration; if (udev->state != USB_STATE_SUSPENDED) duration = jiffies_to_msecs(jiffies + udev->active_duration); else duration = jiffies_to_msecs(udev->active_duration); return sprintf(buf, "%u\n", duration); } static DEVICE_ATTR(active_duration, S_IRUGO, show_active_duration, NULL); static ssize_t show_autosuspend(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", dev->power.autosuspend_delay / 1000); } static ssize_t set_autosuspend(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value; if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/1000 || value <= -INT_MAX/1000) return -EINVAL; pm_runtime_set_autosuspend_delay(dev, value * 1000); return count; } static DEVICE_ATTR(autosuspend, S_IRUGO | S_IWUSR, show_autosuspend, set_autosuspend); static const char on_string[] = "on"; static const char auto_string[] = "auto"; static void warn_level(void) { static int level_warned; if (!level_warned) { level_warned = 1; printk(KERN_WARNING "WARNING! power/level is deprecated; " "use power/control instead\n"); } } static ssize_t show_level(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev = to_usb_device(dev); const char *p = auto_string; warn_level(); if (udev->state != USB_STATE_SUSPENDED && !udev->dev.power.runtime_auto) p = on_string; return sprintf(buf, "%s\n", p); } static ssize_t set_level(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_device *udev = to_usb_device(dev); int len = count; char *cp; int rc = count; warn_level(); cp = memchr(buf, '\n', count); if (cp) len = cp - buf; usb_lock_device(udev); if (len == sizeof on_string - 1 && strncmp(buf, on_string, len) == 0) usb_disable_autosuspend(udev); else if (len == sizeof auto_string - 1 && strncmp(buf, auto_string, len) == 0) usb_enable_autosuspend(udev); else rc = -EINVAL; usb_unlock_device(udev); return rc; } static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level); static ssize_t show_usb2_hardware_lpm(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev = to_usb_device(dev); const char *p; if (udev->usb2_hw_lpm_enabled == 1) p = "enabled"; else p = "disabled"; return sprintf(buf, "%s\n", p); } static ssize_t set_usb2_hardware_lpm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_device *udev = to_usb_device(dev); bool value; int ret; usb_lock_device(udev); ret = strtobool(buf, &value); if (!ret) ret = usb_set_usb2_hardware_lpm(udev, value); usb_unlock_device(udev); if (!ret) return count; return ret; } static DEVICE_ATTR(usb2_hardware_lpm, S_IRUGO | S_IWUSR, show_usb2_hardware_lpm, set_usb2_hardware_lpm); static struct attribute *usb2_hardware_lpm_attr[] = { &dev_attr_usb2_hardware_lpm.attr, NULL, }; static struct attribute_group usb2_hardware_lpm_attr_group = { .name = power_group_name, .attrs = usb2_hardware_lpm_attr, }; static struct attribute *power_attrs[] = { &dev_attr_autosuspend.attr, &dev_attr_level.attr, &dev_attr_connected_duration.attr, &dev_attr_active_duration.attr, NULL, }; static struct attribute_group power_attr_group = { .name = power_group_name, .attrs = power_attrs, }; static int add_power_attributes(struct device *dev) { int rc = 0; if (is_usb_device(dev)) { struct usb_device *udev = to_usb_device(dev); rc = sysfs_merge_group(&dev->kobj, &power_attr_group); if (udev->usb2_hw_lpm_capable == 1) rc = sysfs_merge_group(&dev->kobj, &usb2_hardware_lpm_attr_group); } return rc; } static void remove_power_attributes(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &usb2_hardware_lpm_attr_group); sysfs_unmerge_group(&dev->kobj, &power_attr_group); } #else #define add_power_attributes(dev) 0 #define remove_power_attributes(dev) do {} while (0) #endif /* CONFIG_USB_SUSPEND */ /* Descriptor fields */ #define usb_descriptor_attr_le16(field, format_string) \ static ssize_t \ show_##field(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct usb_device *udev; \ \ udev = to_usb_device(dev); \ return sprintf(buf, format_string, \ le16_to_cpu(udev->descriptor.field)); \ } \ static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); usb_descriptor_attr_le16(idVendor, "%04x\n") usb_descriptor_attr_le16(idProduct, "%04x\n") usb_descriptor_attr_le16(bcdDevice, "%04x\n") #define usb_descriptor_attr(field, format_string) \ static ssize_t \ show_##field(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct usb_device *udev; \ \ udev = to_usb_device(dev); \ return sprintf(buf, format_string, udev->descriptor.field); \ } \ static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); usb_descriptor_attr(bDeviceClass, "%02x\n") usb_descriptor_attr(bDeviceSubClass, "%02x\n") usb_descriptor_attr(bDeviceProtocol, "%02x\n") usb_descriptor_attr(bNumConfigurations, "%d\n") usb_descriptor_attr(bMaxPacketSize0, "%d\n") /* show if the device is authorized (1) or not (0) */ static ssize_t usb_dev_authorized_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *usb_dev = to_usb_device(dev); return snprintf(buf, PAGE_SIZE, "%u\n", usb_dev->authorized); } /* * Authorize a device to be used in the system * * Writing a 0 deauthorizes the device, writing a 1 authorizes it. */ static ssize_t usb_dev_authorized_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { ssize_t result; struct usb_device *usb_dev = to_usb_device(dev); unsigned val; result = sscanf(buf, "%u\n", &val); if (result != 1) result = -EINVAL; else if (val == 0) result = usb_deauthorize_device(usb_dev); else result = usb_authorize_device(usb_dev); return result < 0? result : size; } static DEVICE_ATTR(authorized, 0644, usb_dev_authorized_show, usb_dev_authorized_store); /* "Safely remove a device" */ static ssize_t usb_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_device *udev = to_usb_device(dev); int rc = 0; usb_lock_device(udev); if (udev->state != USB_STATE_NOTATTACHED) { /* To avoid races, first unconfigure and then remove */ usb_set_configuration(udev, -1); rc = usb_remove_device(udev); } if (rc == 0) rc = count; usb_unlock_device(udev); return rc; } static DEVICE_ATTR(remove, 0200, NULL, usb_remove_store); static struct attribute *dev_attrs[] = { /* current configuration's attributes */ &dev_attr_configuration.attr, &dev_attr_bNumInterfaces.attr, &dev_attr_bConfigurationValue.attr, &dev_attr_bmAttributes.attr, &dev_attr_bMaxPower.attr, /* device attributes */ &dev_attr_urbnum.attr, &dev_attr_idVendor.attr, &dev_attr_idProduct.attr, &dev_attr_bcdDevice.attr, &dev_attr_bDeviceClass.attr, &dev_attr_bDeviceSubClass.attr, &dev_attr_bDeviceProtocol.attr, &dev_attr_bNumConfigurations.attr, &dev_attr_bMaxPacketSize0.attr, &dev_attr_speed.attr, &dev_attr_busnum.attr, &dev_attr_devnum.attr, &dev_attr_devpath.attr, &dev_attr_version.attr, &dev_attr_maxchild.attr, &dev_attr_quirks.attr, &dev_attr_avoid_reset_quirk.attr, &dev_attr_authorized.attr, &dev_attr_remove.attr, &dev_attr_removable.attr, NULL, }; static struct attribute_group dev_attr_grp = { .attrs = dev_attrs, }; /* When modifying this list, be sure to modify dev_string_attrs_are_visible() * accordingly. */ static struct attribute *dev_string_attrs[] = { &dev_attr_manufacturer.attr, &dev_attr_product.attr, &dev_attr_serial.attr, NULL }; static umode_t dev_string_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct usb_device *udev = to_usb_device(dev); if (a == &dev_attr_manufacturer.attr) { if (udev->manufacturer == NULL) return 0; } else if (a == &dev_attr_product.attr) { if (udev->product == NULL) return 0; } else if (a == &dev_attr_serial.attr) { if (udev->serial == NULL) return 0; } return a->mode; } static struct attribute_group dev_string_attr_grp = { .attrs = dev_string_attrs, .is_visible = dev_string_attrs_are_visible, }; const struct attribute_group *usb_device_groups[] = { &dev_attr_grp, &dev_string_attr_grp, NULL }; /* Binary descriptors */ static ssize_t read_descriptors(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct usb_device *udev = to_usb_device(dev); size_t nleft = count; size_t srclen, n; int cfgno; void *src; /* The binary attribute begins with the device descriptor. * Following that are the raw descriptor entries for all the * configurations (config plus subsidiary descriptors). */ for (cfgno = -1; cfgno < udev->descriptor.bNumConfigurations && nleft > 0; ++cfgno) { if (cfgno < 0) { src = &udev->descriptor; srclen = sizeof(struct usb_device_descriptor); } else { src = udev->rawdescriptors[cfgno]; srclen = __le16_to_cpu(udev->config[cfgno].desc. wTotalLength); } if (off < srclen) { n = min(nleft, srclen - (size_t) off); memcpy(buf, src + off, n); nleft -= n; buf += n; off = 0; } else { off -= srclen; } } return count - nleft; } static struct bin_attribute dev_bin_attr_descriptors = { .attr = {.name = "descriptors", .mode = 0444}, .read = read_descriptors, .size = 18 + 65535, /* dev descr + max-size raw descriptor */ }; int usb_create_sysfs_dev_files(struct usb_device *udev) { struct device *dev = &udev->dev; int retval; retval = device_create_bin_file(dev, &dev_bin_attr_descriptors); if (retval) goto error; retval = add_persist_attributes(dev); if (retval) goto error; retval = add_power_attributes(dev); if (retval) goto error; return retval; error: usb_remove_sysfs_dev_files(udev); return retval; } void usb_remove_sysfs_dev_files(struct usb_device *udev) { struct device *dev = &udev->dev; remove_power_attributes(dev); remove_persist_attributes(dev); device_remove_bin_file(dev, &dev_bin_attr_descriptors); } /* Interface Accociation Descriptor fields */ #define usb_intf_assoc_attr(field, format_string) \ static ssize_t \ show_iad_##field(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct usb_interface *intf = to_usb_interface(dev); \ \ return sprintf(buf, format_string, \ intf->intf_assoc->field); \ } \ static DEVICE_ATTR(iad_##field, S_IRUGO, show_iad_##field, NULL); usb_intf_assoc_attr(bFirstInterface, "%02x\n") usb_intf_assoc_attr(bInterfaceCount, "%02d\n") usb_intf_assoc_attr(bFunctionClass, "%02x\n") usb_intf_assoc_attr(bFunctionSubClass, "%02x\n") usb_intf_assoc_attr(bFunctionProtocol, "%02x\n") /* Interface fields */ #define usb_intf_attr(field, format_string) \ static ssize_t \ show_##field(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct usb_interface *intf = to_usb_interface(dev); \ \ return sprintf(buf, format_string, \ intf->cur_altsetting->desc.field); \ } \ static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); usb_intf_attr(bInterfaceNumber, "%02x\n") usb_intf_attr(bAlternateSetting, "%2d\n") usb_intf_attr(bNumEndpoints, "%02x\n") usb_intf_attr(bInterfaceClass, "%02x\n") usb_intf_attr(bInterfaceSubClass, "%02x\n") usb_intf_attr(bInterfaceProtocol, "%02x\n") static ssize_t show_interface_string(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf; char *string; intf = to_usb_interface(dev); string = intf->cur_altsetting->string; barrier(); /* The altsetting might change! */ if (!string) return 0; return sprintf(buf, "%s\n", string); } static DEVICE_ATTR(interface, S_IRUGO, show_interface_string, NULL); static ssize_t show_modalias(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf; struct usb_device *udev; struct usb_host_interface *alt; intf = to_usb_interface(dev); udev = interface_to_usbdev(intf); alt = intf->cur_altsetting; return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X" "ic%02Xisc%02Xip%02Xin%02X\n", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), le16_to_cpu(udev->descriptor.bcdDevice), udev->descriptor.bDeviceClass, udev->descriptor.bDeviceSubClass, udev->descriptor.bDeviceProtocol, alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol, alt->desc.bInterfaceNumber); } static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL); static ssize_t show_supports_autosuspend(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf; struct usb_device *udev; int ret; intf = to_usb_interface(dev); udev = interface_to_usbdev(intf); usb_lock_device(udev); /* Devices will be autosuspended even when an interface isn't claimed */ if (!intf->dev.driver || to_usb_driver(intf->dev.driver)->supports_autosuspend) ret = sprintf(buf, "%u\n", 1); else ret = sprintf(buf, "%u\n", 0); usb_unlock_device(udev); return ret; } static DEVICE_ATTR(supports_autosuspend, S_IRUGO, show_supports_autosuspend, NULL); static struct attribute *intf_attrs[] = { &dev_attr_bInterfaceNumber.attr, &dev_attr_bAlternateSetting.attr, &dev_attr_bNumEndpoints.attr, &dev_attr_bInterfaceClass.attr, &dev_attr_bInterfaceSubClass.attr, &dev_attr_bInterfaceProtocol.attr, &dev_attr_modalias.attr, &dev_attr_supports_autosuspend.attr, NULL, }; static struct attribute_group intf_attr_grp = { .attrs = intf_attrs, }; static struct attribute *intf_assoc_attrs[] = { &dev_attr_iad_bFirstInterface.attr, &dev_attr_iad_bInterfaceCount.attr, &dev_attr_iad_bFunctionClass.attr, &dev_attr_iad_bFunctionSubClass.attr, &dev_attr_iad_bFunctionProtocol.attr, NULL, }; static umode_t intf_assoc_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct usb_interface *intf = to_usb_interface(dev); if (intf->intf_assoc == NULL) return 0; return a->mode; } static struct attribute_group intf_assoc_attr_grp = { .attrs = intf_assoc_attrs, .is_visible = intf_assoc_attrs_are_visible, }; const struct attribute_group *usb_interface_groups[] = { &intf_attr_grp, &intf_assoc_attr_grp, NULL }; void usb_create_sysfs_intf_files(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *alt = intf->cur_altsetting; if (intf->sysfs_files_created || intf->unregistering) return; if (!alt->string && !(udev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS)) alt->string = usb_cache_string(udev, alt->desc.iInterface); if (alt->string && device_create_file(&intf->dev, &dev_attr_interface)) ; /* We don't actually care if the function fails. */ intf->sysfs_files_created = 1; } void usb_remove_sysfs_intf_files(struct usb_interface *intf) { if (!intf->sysfs_files_created) return; device_remove_file(&intf->dev, &dev_attr_interface); intf->sysfs_files_created = 0; }
gpl-2.0
loglud/acclaim_kernel
arch/powerpc/platforms/embedded6xx/storcenter.c
4498
3353
/* * Board setup routines for the storcenter * * Copyright 2007 (C) Oyvind Repvik (nail@nslu2-linux.org) * Copyright 2007 Andy Wilcox, Jon Loeliger * * Based on linkstation.c by G. Liakhovetski * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of * any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/initrd.h> #include <linux/of_platform.h> #include <asm/system.h> #include <asm/time.h> #include <asm/prom.h> #include <asm/mpic.h> #include <asm/pci-bridge.h> #include "mpc10x.h" static __initdata struct of_device_id storcenter_of_bus[] = { { .name = "soc", }, {}, }; static int __init storcenter_device_probe(void) { of_platform_bus_probe(NULL, storcenter_of_bus, NULL); return 0; } machine_device_initcall(storcenter, storcenter_device_probe); static int __init storcenter_add_bridge(struct device_node *dev) { #ifdef CONFIG_PCI int len; struct pci_controller *hose; const int *bus_range; printk("Adding PCI host bridge %s\n", dev->full_name); hose = pcibios_alloc_controller(dev); if (hose == NULL) return -ENOMEM; bus_range = of_get_property(dev, "bus-range", &len); hose->first_busno = bus_range ? bus_range[0] : 0; hose->last_busno = bus_range ? bus_range[1] : 0xff; setup_indirect_pci(hose, MPC10X_MAPB_CNFG_ADDR, MPC10X_MAPB_CNFG_DATA, 0); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(hose, dev, 1); #endif return 0; } static void __init storcenter_setup_arch(void) { struct device_node *np; /* Lookup PCI host bridges */ for_each_compatible_node(np, "pci", "mpc10x-pci") storcenter_add_bridge(np); printk(KERN_INFO "IOMEGA StorCenter\n"); } /* * Interrupt setup and service. Interrrupts on the turbostation come * from the four PCI slots plus onboard 8241 devices: I2C, DUART. */ static void __init storcenter_init_IRQ(void) { struct mpic *mpic; struct device_node *dnp; const void *prop; int size; phys_addr_t paddr; dnp = of_find_node_by_type(NULL, "open-pic"); if (dnp == NULL) return; prop = of_get_property(dnp, "reg", &size); if (prop == NULL) { of_node_put(dnp); return; } paddr = (phys_addr_t)of_translate_address(dnp, prop); mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET, 16, 32, " OpenPIC "); of_node_put(dnp); BUG_ON(mpic == NULL); /* * 16 Serial Interrupts followed by 16 Internal Interrupts. * I2C is the second internal, so it is at 17, 0x11020. */ mpic_assign_isu(mpic, 0, paddr + 0x10200); mpic_assign_isu(mpic, 1, paddr + 0x11000); mpic_init(mpic); } static void storcenter_restart(char *cmd) { local_irq_disable(); /* Set exception prefix high - to the firmware */ _nmask_and_or_msr(0, MSR_IP); /* Wait for reset to happen */ for (;;) ; } static int __init storcenter_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "iomega,storcenter"); } define_machine(storcenter){ .name = "IOMEGA StorCenter", .probe = storcenter_probe, .setup_arch = storcenter_setup_arch, .init_IRQ = storcenter_init_IRQ, .get_irq = mpic_get_irq, .restart = storcenter_restart, .calibrate_decr = generic_calibrate_decr, };
gpl-2.0
hallovveen31/M8_JUST_ONE_KERNEL
kernel/time/tick-oneshot.c
7826
2892
/* * linux/kernel/time/tick-oneshot.c * * This file contains functions which manage high resolution tick * related events. * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner * * This code is licenced under the GPL version 2. For details see * kernel-base/COPYING. */ #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> #include "tick-internal.h" /** * tick_program_event */ int tick_program_event(ktime_t expires, int force) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); return clockevents_program_event(dev, expires, force); } /** * tick_resume_onshot - resume oneshot mode */ void tick_resume_oneshot(void) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); clockevents_program_event(dev, ktime_get(), true); } /** * tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz) */ void tick_setup_oneshot(struct clock_event_device *newdev, void (*handler)(struct clock_event_device *), ktime_t next_event) { newdev->event_handler = handler; clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); clockevents_program_event(newdev, next_event, true); } /** * tick_switch_to_oneshot - switch to oneshot mode */ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) { struct tick_device *td = &__get_cpu_var(tick_cpu_device); struct clock_event_device *dev = td->evtdev; if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || !tick_device_is_functional(dev)) { printk(KERN_INFO "Clockevents: " "could not switch to one-shot mode:"); if (!dev) { printk(" no tick device\n"); } else { if (!tick_device_is_functional(dev)) printk(" %s is not functional.\n", dev->name); else printk(" %s does not support one-shot mode.\n", dev->name); } return -EINVAL; } td->mode = TICKDEV_MODE_ONESHOT; dev->event_handler = handler; clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); tick_broadcast_switch_to_oneshot(); return 0; } /** * tick_check_oneshot_mode - check whether the system is in oneshot mode * * returns 1 when either nohz or highres are enabled. otherwise 0. */ int tick_oneshot_mode_active(void) { unsigned long flags; int ret; local_irq_save(flags); ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT; local_irq_restore(flags); return ret; } #ifdef CONFIG_HIGH_RES_TIMERS /** * tick_init_highres - switch to high resolution mode * * Called with interrupts disabled. */ int tick_init_highres(void) { return tick_switch_to_oneshot(hrtimer_interrupt); } #endif
gpl-2.0
chirayudesai/linux-msm-fusion3
sound/drivers/pcsp/pcsp_mixer.c
9362
4117
/* * PC-Speaker driver for Linux * * Mixer implementation. * Copyright (C) 2001-2008 Stas Sergeev */ #include <sound/core.h> #include <sound/control.h> #include "pcsp.h" static int pcsp_enable_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int pcsp_enable_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcsp *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->enable; return 0; } static int pcsp_enable_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcsp *chip = snd_kcontrol_chip(kcontrol); int changed = 0; int enab = ucontrol->value.integer.value[0]; if (enab != chip->enable) { chip->enable = enab; changed = 1; } return changed; } static int pcsp_treble_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_pcsp *chip = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = chip->max_treble + 1; if (uinfo->value.enumerated.item > chip->max_treble) uinfo->value.enumerated.item = chip->max_treble; sprintf(uinfo->value.enumerated.name, "%lu", (unsigned long)PCSP_CALC_RATE(uinfo->value.enumerated.item)); return 0; } static int pcsp_treble_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcsp *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.enumerated.item[0] = chip->treble; return 0; } static int pcsp_treble_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcsp *chip = snd_kcontrol_chip(kcontrol); int changed = 0; int treble = ucontrol->value.enumerated.item[0]; if (treble != chip->treble) { chip->treble = treble; #if PCSP_DEBUG printk(KERN_INFO "PCSP: rate set to %li\n", PCSP_RATE()); #endif changed = 1; } return changed; } static int pcsp_pcspkr_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int pcsp_pcspkr_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcsp *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->pcspkr; return 0; } static int pcsp_pcspkr_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcsp *chip = snd_kcontrol_chip(kcontrol); int changed = 0; int spkr = ucontrol->value.integer.value[0]; if (spkr != chip->pcspkr) { chip->pcspkr = spkr; changed = 1; } return changed; } #define PCSP_MIXER_CONTROL(ctl_type, ctl_name) \ { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = ctl_name, \ .info = pcsp_##ctl_type##_info, \ .get = pcsp_##ctl_type##_get, \ .put = pcsp_##ctl_type##_put, \ } static struct snd_kcontrol_new __devinitdata snd_pcsp_controls_pcm[] = { PCSP_MIXER_CONTROL(enable, "Master Playback Switch"), PCSP_MIXER_CONTROL(treble, "BaseFRQ Playback Volume"), }; static struct snd_kcontrol_new __devinitdata snd_pcsp_controls_spkr[] = { PCSP_MIXER_CONTROL(pcspkr, "Beep Playback Switch"), }; static int __devinit snd_pcsp_ctls_add(struct snd_pcsp *chip, struct snd_kcontrol_new *ctls, int num) { int i, err; struct snd_card *card = chip->card; for (i = 0; i < num; i++) { err = snd_ctl_add(card, snd_ctl_new1(ctls + i, chip)); if (err < 0) return err; } return 0; } int __devinit snd_pcsp_new_mixer(struct snd_pcsp *chip, int nopcm) { int err; struct snd_card *card = chip->card; if (!nopcm) { err = snd_pcsp_ctls_add(chip, snd_pcsp_controls_pcm, ARRAY_SIZE(snd_pcsp_controls_pcm)); if (err < 0) return err; } err = snd_pcsp_ctls_add(chip, snd_pcsp_controls_spkr, ARRAY_SIZE(snd_pcsp_controls_spkr)); if (err < 0) return err; strcpy(card->mixername, "PC-Speaker"); return 0; }
gpl-2.0
ali-filth/android_kernel_samsung_msm8226
drivers/scsi/NCR53c406a.c
11154
28738
/* * NCR53c406.c * Low-level SCSI driver for NCR53c406a chip. * Copyright (C) 1994, 1995, 1996 Normunds Saumanis (normunds@fi.ibm.com) * * LILO command line usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]] * Specify IRQ = 0 for non-interrupt driven mode. * FASTPIO = 1 for fast pio mode, 0 for slow mode. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * */ #define NCR53C406A_DEBUG 0 #define VERBOSE_NCR53C406A_DEBUG 0 /* Set this to 1 for PIO mode (recommended) or to 0 for DMA mode */ #define USE_PIO 1 #define USE_BIOS 0 /* #define BIOS_ADDR 0xD8000 *//* define this if autoprobe fails */ /* #define PORT_BASE 0x330 *//* define this if autoprobe fails */ /* #define IRQ_LEV 0 *//* define this if autoprobe fails */ #define DMA_CHAN 5 /* this is ignored if DMA is disabled */ /* Set this to 0 if you encounter kernel lockups while transferring * data in PIO mode */ #define USE_FAST_PIO 1 /* ============= End of user configurable parameters ============= */ #include <linux/module.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/irq.h> #include <linux/blkdev.h> #include <linux/spinlock.h> #include "scsi.h" #include <scsi/scsi_host.h> /* ============================================================= */ #define WATCHDOG 5000000 #define SYNC_MODE 0 /* Synchronous transfer mode */ #ifdef DEBUG #undef NCR53C406A_DEBUG #define NCR53C406A_DEBUG 1 #endif #if USE_PIO #define USE_DMA 0 #else #define USE_DMA 1 #endif /* Default configuration */ #define C1_IMG 0x07 /* ID=7 */ #define C2_IMG 0x48 /* FE SCSI2 */ #if USE_DMA #define C3_IMG 0x21 /* CDB TE */ #else #define C3_IMG 0x20 /* CDB */ #endif #define C4_IMG 0x04 /* ANE */ #define C5_IMG 0xb6 /* AA PI SIE POL */ #define REG0 (outb(C4_IMG, CONFIG4)) #define REG1 (outb(C5_IMG, CONFIG5)) #if NCR53C406A_DEBUG #define DEB(x) x #else #define DEB(x) #endif #if VERBOSE_NCR53C406A_DEBUG #define VDEB(x) x #else #define VDEB(x) #endif #define LOAD_DMA_COUNT(count) \ outb(count & 0xff, TC_LSB); \ outb((count >> 8) & 0xff, TC_MSB); \ outb((count >> 16) & 0xff, TC_HIGH); /* Chip commands */ #define DMA_OP 0x80 #define SCSI_NOP 0x00 #define FLUSH_FIFO 0x01 #define CHIP_RESET 0x02 #define SCSI_RESET 0x03 #define RESELECT 0x40 #define SELECT_NO_ATN 0x41 #define SELECT_ATN 0x42 #define SELECT_ATN_STOP 0x43 #define ENABLE_SEL 0x44 #define DISABLE_SEL 0x45 #define SELECT_ATN3 0x46 #define RESELECT3 0x47 #define TRANSFER_INFO 0x10 #define INIT_CMD_COMPLETE 0x11 #define MSG_ACCEPT 0x12 #define TRANSFER_PAD 0x18 #define SET_ATN 0x1a #define RESET_ATN 0x1b #define SEND_MSG 0x20 #define SEND_STATUS 0x21 #define SEND_DATA 0x22 #define DISCONN_SEQ 0x23 #define TERMINATE_SEQ 0x24 #define TARG_CMD_COMPLETE 0x25 #define DISCONN 0x27 #define RECV_MSG 0x28 #define RECV_CMD 0x29 #define RECV_DATA 0x2a #define RECV_CMD_SEQ 0x2b #define TARGET_ABORT_DMA 0x04 /*----------------------------------------------------------------*/ /* the following will set the monitor border color (useful to find where something crashed or gets stuck at */ /* 1 = blue 2 = green 3 = cyan 4 = red 5 = magenta 6 = yellow 7 = white */ #if NCR53C406A_DEBUG #define rtrc(i) {inb(0x3da);outb(0x31,0x3c0);outb((i),0x3c0);} #else #define rtrc(i) {} #endif /*----------------------------------------------------------------*/ enum Phase { idle, data_out, data_in, command_ph, status_ph, message_out, message_in }; /* Static function prototypes */ static void NCR53c406a_intr(void *); static irqreturn_t do_NCR53c406a_intr(int, void *); static void chip_init(void); static void calc_port_addr(void); #ifndef IRQ_LEV static int irq_probe(void); #endif /* ================================================================= */ #if USE_BIOS static void *bios_base; #endif #ifdef PORT_BASE static int port_base = PORT_BASE; #else static int port_base; #endif #ifdef IRQ_LEV static int irq_level = IRQ_LEV; #else static int irq_level = -1; /* 0 is 'no irq', so use -1 for 'uninitialized' */ #endif #if USE_DMA static int dma_chan; #endif #if USE_PIO static int fast_pio = USE_FAST_PIO; #endif static Scsi_Cmnd *current_SC; static char info_msg[256]; /* ================================================================= */ /* possible BIOS locations */ #if USE_BIOS static void *addresses[] = { (void *) 0xd8000, (void *) 0xc8000 }; #define ADDRESS_COUNT ARRAY_SIZE(addresses) #endif /* USE_BIOS */ /* possible i/o port addresses */ static unsigned short ports[] = { 0x230, 0x330, 0x280, 0x290, 0x330, 0x340, 0x300, 0x310, 0x348, 0x350 }; #define PORT_COUNT ARRAY_SIZE(ports) #ifndef MODULE /* possible interrupt channels */ static unsigned short intrs[] = { 10, 11, 12, 15 }; #define INTR_COUNT ARRAY_SIZE(intrs) #endif /* !MODULE */ /* signatures for NCR 53c406a based controllers */ #if USE_BIOS struct signature { char *signature; int sig_offset; int sig_length; } signatures[] __initdata = { /* 1 2 3 4 5 6 */ /* 123456789012345678901234567890123456789012345678901234567890 */ { "Copyright (C) Acculogic, Inc.\r\n2.8M Diskette Extension Bios ver 4.04.03 03/01/1993", 61, 82},}; #define SIGNATURE_COUNT ARRAY_SIZE(signatures) #endif /* USE_BIOS */ /* ============================================================ */ /* Control Register Set 0 */ static int TC_LSB; /* transfer counter lsb */ static int TC_MSB; /* transfer counter msb */ static int SCSI_FIFO; /* scsi fifo register */ static int CMD_REG; /* command register */ static int STAT_REG; /* status register */ static int DEST_ID; /* selection/reselection bus id */ static int INT_REG; /* interrupt status register */ static int SRTIMOUT; /* select/reselect timeout reg */ static int SEQ_REG; /* sequence step register */ static int SYNCPRD; /* synchronous transfer period */ static int FIFO_FLAGS; /* indicates # of bytes in fifo */ static int SYNCOFF; /* synchronous offset register */ static int CONFIG1; /* configuration register */ static int CLKCONV; /* clock conversion reg */ /*static int TESTREG;*//* test mode register */ static int CONFIG2; /* Configuration 2 Register */ static int CONFIG3; /* Configuration 3 Register */ static int CONFIG4; /* Configuration 4 Register */ static int TC_HIGH; /* Transfer Counter High */ /*static int FIFO_BOTTOM;*//* Reserve FIFO byte register */ /* Control Register Set 1 */ /*static int JUMPER_SENSE;*//* Jumper sense port reg (r/w) */ /*static int SRAM_PTR;*//* SRAM address pointer reg (r/w) */ /*static int SRAM_DATA;*//* SRAM data register (r/w) */ static int PIO_FIFO; /* PIO FIFO registers (r/w) */ /*static int PIO_FIFO1;*//* */ /*static int PIO_FIFO2;*//* */ /*static int PIO_FIFO3;*//* */ static int PIO_STATUS; /* PIO status (r/w) */ /*static int ATA_CMD;*//* ATA command/status reg (r/w) */ /*static int ATA_ERR;*//* ATA features/error register (r/w) */ static int PIO_FLAG; /* PIO flag interrupt enable (r/w) */ static int CONFIG5; /* Configuration 5 register (r/w) */ /*static int SIGNATURE;*//* Signature Register (r) */ /*static int CONFIG6;*//* Configuration 6 register (r) */ /* ============================================================== */ #if USE_DMA static __inline__ int NCR53c406a_dma_setup(unsigned char *ptr, unsigned int count, unsigned char mode) { unsigned limit; unsigned long flags = 0; VDEB(printk("dma: before count=%d ", count)); if (dma_chan <= 3) { if (count > 65536) count = 65536; limit = 65536 - (((unsigned) ptr) & 0xFFFF); } else { if (count > (65536 << 1)) count = (65536 << 1); limit = (65536 << 1) - (((unsigned) ptr) & 0x1FFFF); } if (count > limit) count = limit; VDEB(printk("after count=%d\n", count)); if ((count & 1) || (((unsigned) ptr) & 1)) panic("NCR53c406a: attempted unaligned DMA transfer\n"); flags = claim_dma_lock(); disable_dma(dma_chan); clear_dma_ff(dma_chan); set_dma_addr(dma_chan, (long) ptr); set_dma_count(dma_chan, count); set_dma_mode(dma_chan, mode); enable_dma(dma_chan); release_dma_lock(flags); return count; } static __inline__ int NCR53c406a_dma_write(unsigned char *src, unsigned int count) { return NCR53c406a_dma_setup(src, count, DMA_MODE_WRITE); } static __inline__ int NCR53c406a_dma_read(unsigned char *src, unsigned int count) { return NCR53c406a_dma_setup(src, count, DMA_MODE_READ); } static __inline__ int NCR53c406a_dma_residual(void) { register int tmp; unsigned long flags; flags = claim_dma_lock(); clear_dma_ff(dma_chan); tmp = get_dma_residue(dma_chan); release_dma_lock(flags); return tmp; } #endif /* USE_DMA */ #if USE_PIO static __inline__ int NCR53c406a_pio_read(unsigned char *request, unsigned int reqlen) { int i; int len; /* current scsi fifo size */ REG1; while (reqlen) { i = inb(PIO_STATUS); /* VDEB(printk("pio_status=%x\n", i)); */ if (i & 0x80) return 0; switch (i & 0x1e) { default: case 0x10: len = 0; break; case 0x0: len = 1; break; case 0x8: len = 42; break; case 0xc: len = 84; break; case 0xe: len = 128; break; } if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occurred */ return 0; } if (len) { if (len > reqlen) len = reqlen; if (fast_pio && len > 3) { insl(PIO_FIFO, request, len >> 2); request += len & 0xfc; reqlen -= len & 0xfc; } else { while (len--) { *request++ = inb(PIO_FIFO); reqlen--; } } } } return 0; } static __inline__ int NCR53c406a_pio_write(unsigned char *request, unsigned int reqlen) { int i = 0; int len; /* current scsi fifo size */ REG1; while (reqlen && !(i & 0x40)) { i = inb(PIO_STATUS); /* VDEB(printk("pio_status=%x\n", i)); */ if (i & 0x80) /* error */ return 0; switch (i & 0x1e) { case 0x10: len = 128; break; case 0x0: len = 84; break; case 0x8: len = 42; break; case 0xc: len = 1; break; default: case 0xe: len = 0; break; } if (len) { if (len > reqlen) len = reqlen; if (fast_pio && len > 3) { outsl(PIO_FIFO, request, len >> 2); request += len & 0xfc; reqlen -= len & 0xfc; } else { while (len--) { outb(*request++, PIO_FIFO); reqlen--; } } } } return 0; } #endif /* USE_PIO */ static int __init NCR53c406a_detect(struct scsi_host_template * tpnt) { int present = 0; struct Scsi_Host *shpnt = NULL; #ifndef PORT_BASE int i; #endif #if USE_BIOS int ii, jj; bios_base = 0; /* look for a valid signature */ for (ii = 0; ii < ADDRESS_COUNT && !bios_base; ii++) for (jj = 0; (jj < SIGNATURE_COUNT) && !bios_base; jj++) if (!memcmp((void *) addresses[ii] + signatures[jj].sig_offset, (void *) signatures[jj].signature, (int) signatures[jj].sig_length)) bios_base = addresses[ii]; if (!bios_base) { printk("NCR53c406a: BIOS signature not found\n"); return 0; } DEB(printk("NCR53c406a BIOS found at 0x%x\n", (unsigned int) bios_base); ); #endif /* USE_BIOS */ #ifdef PORT_BASE if (!request_region(port_base, 0x10, "NCR53c406a")) /* ports already snatched */ port_base = 0; #else /* autodetect */ if (port_base) { /* LILO override */ if (!request_region(port_base, 0x10, "NCR53c406a")) port_base = 0; } else { for (i = 0; i < PORT_COUNT && !port_base; i++) { if (!request_region(ports[i], 0x10, "NCR53c406a")) { DEB(printk("NCR53c406a: port 0x%x in use\n", ports[i])); } else { VDEB(printk("NCR53c406a: port 0x%x available\n", ports[i])); outb(C5_IMG, ports[i] + 0x0d); /* reg set 1 */ if ((inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7 && (inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7 && (inb(ports[i] + 0x0e) & 0xf8) == 0x58) { port_base = ports[i]; VDEB(printk("NCR53c406a: Sig register valid\n")); VDEB(printk("port_base=0x%x\n", port_base)); break; } release_region(ports[i], 0x10); } } } #endif /* PORT_BASE */ if (!port_base) { /* no ports found */ printk("NCR53c406a: no available ports found\n"); return 0; } DEB(printk("NCR53c406a detected\n")); calc_port_addr(); chip_init(); #ifndef IRQ_LEV if (irq_level < 0) { /* LILO override if >= 0 */ irq_level = irq_probe(); if (irq_level < 0) { /* Trouble */ printk("NCR53c406a: IRQ problem, irq_level=%d, giving up\n", irq_level); goto err_release; } } #endif DEB(printk("NCR53c406a: using port_base 0x%x\n", port_base)); present = 1; tpnt->proc_name = "NCR53c406a"; shpnt = scsi_register(tpnt, 0); if (!shpnt) { printk("NCR53c406a: Unable to register host, giving up.\n"); goto err_release; } if (irq_level > 0) { if (request_irq(irq_level, do_NCR53c406a_intr, 0, "NCR53c406a", shpnt)) { printk("NCR53c406a: unable to allocate IRQ %d\n", irq_level); goto err_free_scsi; } tpnt->can_queue = 1; DEB(printk("NCR53c406a: allocated IRQ %d\n", irq_level)); } else if (irq_level == 0) { tpnt->can_queue = 0; DEB(printk("NCR53c406a: No interrupts detected\n")); printk("NCR53c406a driver no longer supports polling interface\n"); printk("Please email linux-scsi@vger.kernel.org\n"); #if USE_DMA printk("NCR53c406a: No interrupts found and DMA mode defined. Giving up.\n"); #endif /* USE_DMA */ goto err_free_scsi; } else { DEB(printk("NCR53c406a: Shouldn't get here!\n")); goto err_free_scsi; } #if USE_DMA dma_chan = DMA_CHAN; if (request_dma(dma_chan, "NCR53c406a") != 0) { printk("NCR53c406a: unable to allocate DMA channel %d\n", dma_chan); goto err_free_irq; } DEB(printk("Allocated DMA channel %d\n", dma_chan)); #endif /* USE_DMA */ shpnt->irq = irq_level; shpnt->io_port = port_base; shpnt->n_io_port = 0x10; #if USE_DMA shpnt->dma = dma_chan; #endif #if USE_DMA sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, DMA channel %d.", port_base, irq_level, dma_chan); #else sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, %s PIO mode.", port_base, irq_level, fast_pio ? "fast" : "slow"); #endif return (present); #if USE_DMA err_free_irq: if (irq_level) free_irq(irq_level, shpnt); #endif err_free_scsi: scsi_unregister(shpnt); err_release: release_region(port_base, 0x10); return 0; } static int NCR53c406a_release(struct Scsi_Host *shost) { if (shost->irq) free_irq(shost->irq, NULL); #ifdef USE_DMA if (shost->dma_channel != 0xff) free_dma(shost->dma_channel); #endif if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); scsi_unregister(shost); return 0; } #ifndef MODULE /* called from init/main.c */ static int __init NCR53c406a_setup(char *str) { static size_t setup_idx = 0; size_t i; int ints[4]; DEB(printk("NCR53c406a: Setup called\n"); ); if (setup_idx >= PORT_COUNT - 1) { printk("NCR53c406a: Setup called too many times. Bad LILO params?\n"); return 0; } get_options(str, 4, ints); if (ints[0] < 1 || ints[0] > 3) { printk("NCR53c406a: Malformed command line\n"); printk("NCR53c406a: Usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]]\n"); return 0; } for (i = 0; i < PORT_COUNT && !port_base; i++) if (ports[i] == ints[1]) { port_base = ints[1]; DEB(printk("NCR53c406a: Specified port_base 0x%x\n", port_base); ) } if (!port_base) { printk("NCR53c406a: Invalid PORTBASE 0x%x specified\n", ints[1]); return 0; } if (ints[0] > 1) { if (ints[2] == 0) { irq_level = 0; DEB(printk("NCR53c406a: Specified irq %d\n", irq_level); ) } else for (i = 0; i < INTR_COUNT && irq_level < 0; i++) if (intrs[i] == ints[2]) { irq_level = ints[2]; DEB(printk("NCR53c406a: Specified irq %d\n", port_base); ) } if (irq_level < 0) printk("NCR53c406a: Invalid IRQ %d specified\n", ints[2]); } if (ints[0] > 2) fast_pio = ints[3]; DEB(printk("NCR53c406a: port_base=0x%x, irq=%d, fast_pio=%d\n", port_base, irq_level, fast_pio);) return 1; } __setup("ncr53c406a=", NCR53c406a_setup); #endif /* !MODULE */ static const char *NCR53c406a_info(struct Scsi_Host *SChost) { DEB(printk("NCR53c406a_info called\n")); return (info_msg); } #if 0 static void wait_intr(void) { unsigned long i = jiffies + WATCHDOG; while (time_after(i, jiffies) && !(inb(STAT_REG) & 0xe0)) { /* wait for a pseudo-interrupt */ cpu_relax(); barrier(); } if (time_before_eq(i, jiffies)) { /* Timed out */ rtrc(0); current_SC->result = DID_TIME_OUT << 16; current_SC->SCp.phase = idle; current_SC->scsi_done(current_SC); return; } NCR53c406a_intr(NULL); } #endif static int NCR53c406a_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) { int i; VDEB(printk("NCR53c406a_queue called\n")); DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->target, SCpnt->lun, scsi_bufflen(SCpnt))); #if 0 VDEB(for (i = 0; i < SCpnt->cmd_len; i++) printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i])); VDEB(printk("\n")); #endif current_SC = SCpnt; current_SC->scsi_done = done; current_SC->SCp.phase = command_ph; current_SC->SCp.Status = 0; current_SC->SCp.Message = 0; /* We are locked here already by the mid layer */ REG0; outb(scmd_id(SCpnt), DEST_ID); /* set destination */ outb(FLUSH_FIFO, CMD_REG); /* reset the fifos */ for (i = 0; i < SCpnt->cmd_len; i++) { outb(SCpnt->cmnd[i], SCSI_FIFO); } outb(SELECT_NO_ATN, CMD_REG); rtrc(1); return 0; } static DEF_SCSI_QCMD(NCR53c406a_queue) static int NCR53c406a_host_reset(Scsi_Cmnd * SCpnt) { DEB(printk("NCR53c406a_reset called\n")); spin_lock_irq(SCpnt->device->host->host_lock); outb(C4_IMG, CONFIG4); /* Select reg set 0 */ outb(CHIP_RESET, CMD_REG); outb(SCSI_NOP, CMD_REG); /* required after reset */ outb(SCSI_RESET, CMD_REG); chip_init(); rtrc(2); spin_unlock_irq(SCpnt->device->host->host_lock); return SUCCESS; } static int NCR53c406a_biosparm(struct scsi_device *disk, struct block_device *dev, sector_t capacity, int *info_array) { int size; DEB(printk("NCR53c406a_biosparm called\n")); size = capacity; info_array[0] = 64; /* heads */ info_array[1] = 32; /* sectors */ info_array[2] = size >> 11; /* cylinders */ if (info_array[2] > 1024) { /* big disk */ info_array[0] = 255; info_array[1] = 63; info_array[2] = size / (255 * 63); } return 0; } static irqreturn_t do_NCR53c406a_intr(int unused, void *dev_id) { unsigned long flags; struct Scsi_Host *dev = dev_id; spin_lock_irqsave(dev->host_lock, flags); NCR53c406a_intr(dev_id); spin_unlock_irqrestore(dev->host_lock, flags); return IRQ_HANDLED; } static void NCR53c406a_intr(void *dev_id) { DEB(unsigned char fifo_size; ) DEB(unsigned char seq_reg; ) unsigned char status, int_reg; #if USE_PIO unsigned char pio_status; struct scatterlist *sg; int i; #endif VDEB(printk("NCR53c406a_intr called\n")); #if USE_PIO REG1; pio_status = inb(PIO_STATUS); #endif REG0; status = inb(STAT_REG); DEB(seq_reg = inb(SEQ_REG)); int_reg = inb(INT_REG); DEB(fifo_size = inb(FIFO_FLAGS) & 0x1f); #if NCR53C406A_DEBUG printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x", status, seq_reg, int_reg, fifo_size); #if (USE_DMA) printk("\n"); #else printk(", pio=%02x\n", pio_status); #endif /* USE_DMA */ #endif /* NCR53C406A_DEBUG */ if (int_reg & 0x80) { /* SCSI reset intr */ rtrc(3); DEB(printk("NCR53c406a: reset intr received\n")); current_SC->SCp.phase = idle; current_SC->result = DID_RESET << 16; current_SC->scsi_done(current_SC); return; } #if USE_PIO if (pio_status & 0x80) { printk("NCR53C406A: Warning: PIO error!\n"); current_SC->SCp.phase = idle; current_SC->result = DID_ERROR << 16; current_SC->scsi_done(current_SC); return; } #endif /* USE_PIO */ if (status & 0x20) { /* Parity error */ printk("NCR53c406a: Warning: parity error!\n"); current_SC->SCp.phase = idle; current_SC->result = DID_PARITY << 16; current_SC->scsi_done(current_SC); return; } if (status & 0x40) { /* Gross error */ printk("NCR53c406a: Warning: gross error!\n"); current_SC->SCp.phase = idle; current_SC->result = DID_ERROR << 16; current_SC->scsi_done(current_SC); return; } if (int_reg & 0x20) { /* Disconnect */ DEB(printk("NCR53c406a: disconnect intr received\n")); if (current_SC->SCp.phase != message_in) { /* Unexpected disconnect */ current_SC->result = DID_NO_CONNECT << 16; } else { /* Command complete, return status and message */ current_SC->result = (current_SC->SCp.Status & 0xff) | ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16); } rtrc(0); current_SC->SCp.phase = idle; current_SC->scsi_done(current_SC); return; } switch (status & 0x07) { /* scsi phase */ case 0x00: /* DATA-OUT */ if (int_reg & 0x10) { /* Target requesting info transfer */ rtrc(5); current_SC->SCp.phase = data_out; VDEB(printk("NCR53c406a: Data-Out phase\n")); outb(FLUSH_FIFO, CMD_REG); LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */ #if USE_DMA /* No s/g support for DMA */ NCR53c406a_dma_write(scsi_sglist(current_SC), scsdi_bufflen(current_SC)); #endif /* USE_DMA */ outb(TRANSFER_INFO | DMA_OP, CMD_REG); #if USE_PIO scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) { NCR53c406a_pio_write(sg_virt(sg), sg->length); } REG0; #endif /* USE_PIO */ } break; case 0x01: /* DATA-IN */ if (int_reg & 0x10) { /* Target requesting info transfer */ rtrc(6); current_SC->SCp.phase = data_in; VDEB(printk("NCR53c406a: Data-In phase\n")); outb(FLUSH_FIFO, CMD_REG); LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */ #if USE_DMA /* No s/g support for DMA */ NCR53c406a_dma_read(scsi_sglist(current_SC), scsdi_bufflen(current_SC)); #endif /* USE_DMA */ outb(TRANSFER_INFO | DMA_OP, CMD_REG); #if USE_PIO scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) { NCR53c406a_pio_read(sg_virt(sg), sg->length); } REG0; #endif /* USE_PIO */ } break; case 0x02: /* COMMAND */ current_SC->SCp.phase = command_ph; printk("NCR53c406a: Warning: Unknown interrupt occurred in command phase!\n"); break; case 0x03: /* STATUS */ rtrc(7); current_SC->SCp.phase = status_ph; VDEB(printk("NCR53c406a: Status phase\n")); outb(FLUSH_FIFO, CMD_REG); outb(INIT_CMD_COMPLETE, CMD_REG); break; case 0x04: /* Reserved */ case 0x05: /* Reserved */ printk("NCR53c406a: WARNING: Reserved phase!!!\n"); break; case 0x06: /* MESSAGE-OUT */ DEB(printk("NCR53c406a: Message-Out phase\n")); current_SC->SCp.phase = message_out; outb(SET_ATN, CMD_REG); /* Reject the message */ outb(MSG_ACCEPT, CMD_REG); break; case 0x07: /* MESSAGE-IN */ rtrc(4); VDEB(printk("NCR53c406a: Message-In phase\n")); current_SC->SCp.phase = message_in; current_SC->SCp.Status = inb(SCSI_FIFO); current_SC->SCp.Message = inb(SCSI_FIFO); VDEB(printk("SCSI FIFO size=%d\n", inb(FIFO_FLAGS) & 0x1f)); DEB(printk("Status = %02x Message = %02x\n", current_SC->SCp.Status, current_SC->SCp.Message)); if (current_SC->SCp.Message == SAVE_POINTERS || current_SC->SCp.Message == DISCONNECT) { outb(SET_ATN, CMD_REG); /* Reject message */ DEB(printk("Discarding SAVE_POINTERS message\n")); } outb(MSG_ACCEPT, CMD_REG); break; } } #ifndef IRQ_LEV static int irq_probe(void) { int irqs, irq; unsigned long i; inb(INT_REG); /* clear the interrupt register */ irqs = probe_irq_on(); /* Invalid command will cause an interrupt */ REG0; outb(0xff, CMD_REG); /* Wait for the interrupt to occur */ i = jiffies + WATCHDOG; while (time_after(i, jiffies) && !(inb(STAT_REG) & 0x80)) barrier(); if (time_before_eq(i, jiffies)) { /* Timed out, must be hardware trouble */ probe_irq_off(irqs); return -1; } irq = probe_irq_off(irqs); /* Kick the chip */ outb(CHIP_RESET, CMD_REG); outb(SCSI_NOP, CMD_REG); chip_init(); return irq; } #endif /* IRQ_LEV */ static void chip_init(void) { REG1; #if USE_DMA outb(0x00, PIO_STATUS); #else /* USE_PIO */ outb(0x01, PIO_STATUS); #endif outb(0x00, PIO_FLAG); outb(C4_IMG, CONFIG4); /* REG0; */ outb(C3_IMG, CONFIG3); outb(C2_IMG, CONFIG2); outb(C1_IMG, CONFIG1); outb(0x05, CLKCONV); /* clock conversion factor */ outb(0x9C, SRTIMOUT); /* Selection timeout */ outb(0x05, SYNCPRD); /* Synchronous transfer period */ outb(SYNC_MODE, SYNCOFF); /* synchronous mode */ } static void __init calc_port_addr(void) { /* Control Register Set 0 */ TC_LSB = (port_base + 0x00); TC_MSB = (port_base + 0x01); SCSI_FIFO = (port_base + 0x02); CMD_REG = (port_base + 0x03); STAT_REG = (port_base + 0x04); DEST_ID = (port_base + 0x04); INT_REG = (port_base + 0x05); SRTIMOUT = (port_base + 0x05); SEQ_REG = (port_base + 0x06); SYNCPRD = (port_base + 0x06); FIFO_FLAGS = (port_base + 0x07); SYNCOFF = (port_base + 0x07); CONFIG1 = (port_base + 0x08); CLKCONV = (port_base + 0x09); /* TESTREG = (port_base+0x0A); */ CONFIG2 = (port_base + 0x0B); CONFIG3 = (port_base + 0x0C); CONFIG4 = (port_base + 0x0D); TC_HIGH = (port_base + 0x0E); /* FIFO_BOTTOM = (port_base+0x0F); */ /* Control Register Set 1 */ /* JUMPER_SENSE = (port_base+0x00); */ /* SRAM_PTR = (port_base+0x01); */ /* SRAM_DATA = (port_base+0x02); */ PIO_FIFO = (port_base + 0x04); /* PIO_FIFO1 = (port_base+0x05); */ /* PIO_FIFO2 = (port_base+0x06); */ /* PIO_FIFO3 = (port_base+0x07); */ PIO_STATUS = (port_base + 0x08); /* ATA_CMD = (port_base+0x09); */ /* ATA_ERR = (port_base+0x0A); */ PIO_FLAG = (port_base + 0x0B); CONFIG5 = (port_base + 0x0D); /* SIGNATURE = (port_base+0x0E); */ /* CONFIG6 = (port_base+0x0F); */ } MODULE_LICENSE("GPL"); /* NOTE: scatter-gather support only works in PIO mode. * Use SG_NONE if DMA mode is enabled! */ static struct scsi_host_template driver_template = { .proc_name = "NCR53c406a" /* proc_name */, .name = "NCR53c406a" /* name */, .detect = NCR53c406a_detect /* detect */, .release = NCR53c406a_release, .info = NCR53c406a_info /* info */, .queuecommand = NCR53c406a_queue /* queuecommand */, .eh_host_reset_handler = NCR53c406a_host_reset /* reset */, .bios_param = NCR53c406a_biosparm /* biosparm */, .can_queue = 1 /* can_queue */, .this_id = 7 /* SCSI ID of the chip */, .sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/, .cmd_per_lun = 1 /* commands per lun */, .unchecked_isa_dma = 1 /* unchecked_isa_dma */, .use_clustering = ENABLE_CLUSTERING, }; #include "scsi_module.c" /* * Overrides for Emacs so that we get a uniform tabbing style. * Emacs will notice this stuff at the end of the file and automatically * adjust the settings for this buffer only. This must remain at the end * of the file. * --------------------------------------------------------------------------- * Local variables: * c-indent-level: 4 * c-brace-imaginary-offset: 0 * c-brace-offset: -4 * c-argdecl-indent: 4 * c-label-offset: -4 * c-continued-statement-offset: 4 * c-continued-brace-offset: 0 * indent-tabs-mode: nil * tab-width: 8 * End: */
gpl-2.0
existz/htc-kernel-msm7x30
drivers/media/video/pwc/pwc-timon.c
14994
67609
/* Linux driver for Philips webcam (C) 2004-2006 Luc Saillard (luc@saillard.org) NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This tables contains entries for the 675/680/690 (Timon) camera, with 4 different qualities (no compression, low, medium, high). It lists the bandwidth requirements for said mode by its alternate interface number. An alternate of 0 means that the mode is unavailable. There are 6 * 4 * 4 entries: 6 different resolutions subqcif, qsif, qcif, sif, cif, vga 6 framerates: 5, 10, 15, 20, 25, 30 4 compression modi: none, low, medium, high When an uncompressed mode is not available, the next available compressed mode will be chosen (unless the decompressor is absent). Sometimes there are only 1 or 2 compressed modes available; in that case entries are duplicated. */ #include "pwc-timon.h" const unsigned int Timon_fps_vector[PWC_FPS_MAX_TIMON] = { 5, 10, 15, 20, 25, 30 }; const struct Timon_table_entry Timon_table[PSZ_MAX][PWC_FPS_MAX_TIMON][4] = { /* SQCIF */ { /* 5 fps */ { {1, 140, 0, {0x05, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x8C, 0xFC, 0x80, 0x02}}, {1, 140, 0, {0x05, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x8C, 0xFC, 0x80, 0x02}}, {1, 140, 0, {0x05, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x8C, 0xFC, 0x80, 0x02}}, {1, 140, 0, {0x05, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x8C, 0xFC, 0x80, 0x02}}, }, /* 10 fps */ { {2, 280, 0, {0x04, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x18, 0xA9, 0x80, 0x02}}, {2, 280, 0, {0x04, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x18, 0xA9, 0x80, 0x02}}, {2, 280, 0, {0x04, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x18, 0xA9, 0x80, 0x02}}, {2, 280, 0, {0x04, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x18, 0xA9, 0x80, 0x02}}, }, /* 15 fps */ { {3, 410, 0, {0x03, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x9A, 0x71, 0x80, 0x02}}, {3, 410, 0, {0x03, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x9A, 0x71, 0x80, 0x02}}, {3, 410, 0, {0x03, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x9A, 0x71, 0x80, 0x02}}, {3, 410, 0, {0x03, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x9A, 0x71, 0x80, 0x02}}, }, /* 20 fps */ { {4, 559, 0, {0x02, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x2F, 0x56, 0x80, 0x02}}, {4, 559, 0, {0x02, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x2F, 0x56, 0x80, 0x02}}, {4, 559, 0, {0x02, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x2F, 0x56, 0x80, 0x02}}, {4, 559, 0, {0x02, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x2F, 0x56, 0x80, 0x02}}, }, /* 25 fps */ { {5, 659, 0, {0x01, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x93, 0x46, 0x80, 0x02}}, {5, 659, 0, {0x01, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x93, 0x46, 0x80, 0x02}}, {5, 659, 0, {0x01, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x93, 0x46, 0x80, 0x02}}, {5, 659, 0, {0x01, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x93, 0x46, 0x80, 0x02}}, }, /* 30 fps */ { {7, 838, 0, {0x00, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x46, 0x3B, 0x80, 0x02}}, {7, 838, 0, {0x00, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x46, 0x3B, 0x80, 0x02}}, {7, 838, 0, {0x00, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x46, 0x3B, 0x80, 0x02}}, {7, 838, 0, {0x00, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x46, 0x3B, 0x80, 0x02}}, }, }, /* QSIF */ { /* 5 fps */ { {1, 146, 0, {0x2D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0xFC, 0xC0, 0x02}}, {1, 146, 0, {0x2D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0xFC, 0xC0, 0x02}}, {1, 146, 0, {0x2D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0xFC, 0xC0, 0x02}}, {1, 146, 0, {0x2D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x92, 0xFC, 0xC0, 0x02}}, }, /* 10 fps */ { {2, 291, 0, {0x2C, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x23, 0xA1, 0xC0, 0x02}}, {1, 191, 630, {0x2C, 0xF4, 0x05, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xBF, 0xF4, 0xC0, 0x02}}, {1, 191, 630, {0x2C, 0xF4, 0x05, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xBF, 0xF4, 0xC0, 0x02}}, {1, 191, 630, {0x2C, 0xF4, 0x05, 0x13, 0xA9, 0x12, 0xE1, 0x17, 0x08, 0xBF, 0xF4, 0xC0, 0x02}}, }, /* 15 fps */ { {3, 437, 0, {0x2B, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0xB5, 0x6D, 0xC0, 0x02}}, {2, 291, 640, {0x2B, 0xF4, 0x05, 0x13, 0xF7, 0x13, 0x2F, 0x13, 0x08, 0x23, 0xA1, 0xC0, 0x02}}, {2, 291, 640, {0x2B, 0xF4, 0x05, 0x13, 0xF7, 0x13, 0x2F, 0x13, 0x08, 0x23, 0xA1, 0xC0, 0x02}}, {1, 191, 420, {0x2B, 0xF4, 0x0D, 0x0D, 0x1B, 0x0C, 0x53, 0x1E, 0x08, 0xBF, 0xF4, 0xC0, 0x02}}, }, /* 20 fps */ { {4, 588, 0, {0x2A, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x4C, 0x52, 0xC0, 0x02}}, {3, 447, 730, {0x2A, 0xF4, 0x05, 0x16, 0xC9, 0x16, 0x01, 0x0E, 0x18, 0xBF, 0x69, 0xC0, 0x02}}, {2, 292, 476, {0x2A, 0xF4, 0x0D, 0x0E, 0xD8, 0x0E, 0x10, 0x19, 0x18, 0x24, 0xA1, 0xC0, 0x02}}, {1, 192, 312, {0x2A, 0xF4, 0x1D, 0x09, 0xB3, 0x08, 0xEB, 0x1E, 0x18, 0xC0, 0xF4, 0xC0, 0x02}}, }, /* 25 fps */ { {5, 703, 0, {0x29, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0xBF, 0x42, 0xC0, 0x02}}, {3, 447, 610, {0x29, 0xF4, 0x05, 0x13, 0x0B, 0x12, 0x43, 0x14, 0x18, 0xBF, 0x69, 0xC0, 0x02}}, {2, 292, 398, {0x29, 0xF4, 0x0D, 0x0C, 0x6C, 0x0B, 0xA4, 0x1E, 0x18, 0x24, 0xA1, 0xC0, 0x02}}, {1, 192, 262, {0x29, 0xF4, 0x25, 0x08, 0x23, 0x07, 0x5B, 0x1E, 0x18, 0xC0, 0xF4, 0xC0, 0x02}}, }, /* 30 fps */ { {8, 873, 0, {0x28, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x69, 0x37, 0xC0, 0x02}}, {5, 704, 774, {0x28, 0xF4, 0x05, 0x18, 0x21, 0x17, 0x59, 0x0F, 0x18, 0xC0, 0x42, 0xC0, 0x02}}, {3, 448, 492, {0x28, 0xF4, 0x05, 0x0F, 0x5D, 0x0E, 0x95, 0x15, 0x18, 0xC0, 0x69, 0xC0, 0x02}}, {2, 291, 320, {0x28, 0xF4, 0x1D, 0x09, 0xFB, 0x09, 0x33, 0x1E, 0x18, 0x23, 0xA1, 0xC0, 0x02}}, }, }, /* QCIF */ { /* 5 fps */ { {1, 193, 0, {0x0D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0xC1, 0xF4, 0xC0, 0x02}}, {1, 193, 0, {0x0D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0xC1, 0xF4, 0xC0, 0x02}}, {1, 193, 0, {0x0D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0xC1, 0xF4, 0xC0, 0x02}}, {1, 193, 0, {0x0D, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0xC1, 0xF4, 0xC0, 0x02}}, }, /* 10 fps */ { {3, 385, 0, {0x0C, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x81, 0x79, 0xC0, 0x02}}, {2, 291, 800, {0x0C, 0xF4, 0x05, 0x18, 0xF4, 0x18, 0x18, 0x11, 0x08, 0x23, 0xA1, 0xC0, 0x02}}, {2, 291, 800, {0x0C, 0xF4, 0x05, 0x18, 0xF4, 0x18, 0x18, 0x11, 0x08, 0x23, 0xA1, 0xC0, 0x02}}, {1, 194, 532, {0x0C, 0xF4, 0x05, 0x10, 0x9A, 0x0F, 0xBE, 0x1B, 0x08, 0xC2, 0xF0, 0xC0, 0x02}}, }, /* 15 fps */ { {4, 577, 0, {0x0B, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x41, 0x52, 0xC0, 0x02}}, {3, 447, 818, {0x0B, 0xF4, 0x05, 0x19, 0x89, 0x18, 0xAD, 0x0F, 0x10, 0xBF, 0x69, 0xC0, 0x02}}, {2, 292, 534, {0x0B, 0xF4, 0x05, 0x10, 0xA3, 0x0F, 0xC7, 0x19, 0x10, 0x24, 0xA1, 0xC0, 0x02}}, {1, 195, 356, {0x0B, 0xF4, 0x15, 0x0B, 0x11, 0x0A, 0x35, 0x1E, 0x10, 0xC3, 0xF0, 0xC0, 0x02}}, }, /* 20 fps */ { {6, 776, 0, {0x0A, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x08, 0x3F, 0xC0, 0x02}}, {4, 591, 804, {0x0A, 0xF4, 0x05, 0x19, 0x1E, 0x18, 0x42, 0x0F, 0x18, 0x4F, 0x4E, 0xC0, 0x02}}, {3, 447, 608, {0x0A, 0xF4, 0x05, 0x12, 0xFD, 0x12, 0x21, 0x15, 0x18, 0xBF, 0x69, 0xC0, 0x02}}, {2, 291, 396, {0x0A, 0xF4, 0x15, 0x0C, 0x5E, 0x0B, 0x82, 0x1E, 0x18, 0x23, 0xA1, 0xC0, 0x02}}, }, /* 25 fps */ { {9, 928, 0, {0x09, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0xA0, 0x33, 0xC0, 0x02}}, {5, 703, 800, {0x09, 0xF4, 0x05, 0x18, 0xF4, 0x18, 0x18, 0x10, 0x18, 0xBF, 0x42, 0xC0, 0x02}}, {3, 447, 508, {0x09, 0xF4, 0x0D, 0x0F, 0xD2, 0x0E, 0xF6, 0x1B, 0x18, 0xBF, 0x69, 0xC0, 0x02}}, {2, 292, 332, {0x09, 0xF4, 0x1D, 0x0A, 0x5A, 0x09, 0x7E, 0x1E, 0x18, 0x24, 0xA1, 0xC0, 0x02}}, }, /* 30 fps */ { {0, }, {9, 956, 876, {0x08, 0xF4, 0x05, 0x1B, 0x58, 0x1A, 0x7C, 0x0E, 0x20, 0xBC, 0x33, 0x10, 0x02}}, {4, 592, 542, {0x08, 0xF4, 0x05, 0x10, 0xE4, 0x10, 0x08, 0x17, 0x20, 0x50, 0x4E, 0x10, 0x02}}, {2, 291, 266, {0x08, 0xF4, 0x25, 0x08, 0x48, 0x07, 0x6C, 0x1E, 0x20, 0x23, 0xA1, 0x10, 0x02}}, }, }, /* SIF */ { /* 5 fps */ { {4, 582, 0, {0x35, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x46, 0x52, 0x60, 0x02}}, {3, 387, 1276, {0x35, 0xF4, 0x05, 0x27, 0xD8, 0x26, 0x48, 0x03, 0x10, 0x83, 0x79, 0x60, 0x02}}, {2, 291, 960, {0x35, 0xF4, 0x0D, 0x1D, 0xF2, 0x1C, 0x62, 0x04, 0x10, 0x23, 0xA1, 0x60, 0x02}}, {1, 191, 630, {0x35, 0xF4, 0x1D, 0x13, 0xA9, 0x12, 0x19, 0x05, 0x08, 0xBF, 0xF4, 0x60, 0x02}}, }, /* 10 fps */ { {0, }, {6, 775, 1278, {0x34, 0xF4, 0x05, 0x27, 0xE8, 0x26, 0x58, 0x05, 0x30, 0x07, 0x3F, 0x10, 0x02}}, {3, 447, 736, {0x34, 0xF4, 0x15, 0x16, 0xFB, 0x15, 0x6B, 0x05, 0x18, 0xBF, 0x69, 0x10, 0x02}}, {2, 291, 480, {0x34, 0xF4, 0x2D, 0x0E, 0xF9, 0x0D, 0x69, 0x09, 0x18, 0x23, 0xA1, 0x10, 0x02}}, }, /* 15 fps */ { {0, }, {9, 955, 1050, {0x33, 0xF4, 0x05, 0x20, 0xCF, 0x1F, 0x3F, 0x06, 0x48, 0xBB, 0x33, 0x10, 0x02}}, {4, 591, 650, {0x33, 0xF4, 0x15, 0x14, 0x44, 0x12, 0xB4, 0x08, 0x30, 0x4F, 0x4E, 0x10, 0x02}}, {3, 448, 492, {0x33, 0xF4, 0x25, 0x0F, 0x52, 0x0D, 0xC2, 0x09, 0x28, 0xC0, 0x69, 0x10, 0x02}}, }, /* 20 fps */ { {0, }, {9, 958, 782, {0x32, 0xF4, 0x0D, 0x18, 0x6A, 0x16, 0xDA, 0x0B, 0x58, 0xBE, 0x33, 0xD0, 0x02}}, {5, 703, 574, {0x32, 0xF4, 0x1D, 0x11, 0xE7, 0x10, 0x57, 0x0B, 0x40, 0xBF, 0x42, 0xD0, 0x02}}, {3, 446, 364, {0x32, 0xF4, 0x3D, 0x0B, 0x5C, 0x09, 0xCC, 0x0E, 0x30, 0xBE, 0x69, 0xD0, 0x02}}, }, /* 25 fps */ { {0, }, {9, 958, 654, {0x31, 0xF4, 0x15, 0x14, 0x66, 0x12, 0xD6, 0x0B, 0x50, 0xBE, 0x33, 0x90, 0x02}}, {6, 776, 530, {0x31, 0xF4, 0x25, 0x10, 0x8C, 0x0E, 0xFC, 0x0C, 0x48, 0x08, 0x3F, 0x90, 0x02}}, {4, 592, 404, {0x31, 0xF4, 0x35, 0x0C, 0x96, 0x0B, 0x06, 0x0B, 0x38, 0x50, 0x4E, 0x90, 0x02}}, }, /* 30 fps */ { {0, }, {9, 957, 526, {0x30, 0xF4, 0x25, 0x10, 0x68, 0x0E, 0xD8, 0x0D, 0x58, 0xBD, 0x33, 0x60, 0x02}}, {6, 775, 426, {0x30, 0xF4, 0x35, 0x0D, 0x48, 0x0B, 0xB8, 0x0F, 0x50, 0x07, 0x3F, 0x60, 0x02}}, {4, 590, 324, {0x30, 0x7A, 0x4B, 0x0A, 0x1C, 0x08, 0xB4, 0x0E, 0x40, 0x4E, 0x52, 0x60, 0x02}}, }, }, /* CIF */ { /* 5 fps */ { {6, 771, 0, {0x15, 0xF4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x3F, 0x80, 0x02}}, {4, 465, 1278, {0x15, 0xF4, 0x05, 0x27, 0xEE, 0x26, 0x36, 0x03, 0x18, 0xD1, 0x65, 0x80, 0x02}}, {2, 291, 800, {0x15, 0xF4, 0x15, 0x18, 0xF4, 0x17, 0x3C, 0x05, 0x18, 0x23, 0xA1, 0x80, 0x02}}, {1, 193, 528, {0x15, 0xF4, 0x2D, 0x10, 0x7E, 0x0E, 0xC6, 0x0A, 0x18, 0xC1, 0xF4, 0x80, 0x02}}, }, /* 10 fps */ { {0, }, {9, 932, 1278, {0x14, 0xF4, 0x05, 0x27, 0xEE, 0x26, 0x36, 0x04, 0x30, 0xA4, 0x33, 0x10, 0x02}}, {4, 591, 812, {0x14, 0xF4, 0x15, 0x19, 0x56, 0x17, 0x9E, 0x06, 0x28, 0x4F, 0x4E, 0x10, 0x02}}, {2, 291, 400, {0x14, 0xF4, 0x3D, 0x0C, 0x7A, 0x0A, 0xC2, 0x0E, 0x28, 0x23, 0xA1, 0x10, 0x02}}, }, /* 15 fps */ { {0, }, {9, 956, 876, {0x13, 0xF4, 0x0D, 0x1B, 0x58, 0x19, 0xA0, 0x05, 0x38, 0xBC, 0x33, 0x60, 0x02}}, {5, 703, 644, {0x13, 0xF4, 0x1D, 0x14, 0x1C, 0x12, 0x64, 0x08, 0x38, 0xBF, 0x42, 0x60, 0x02}}, {3, 448, 410, {0x13, 0xF4, 0x3D, 0x0C, 0xC4, 0x0B, 0x0C, 0x0E, 0x38, 0xC0, 0x69, 0x60, 0x02}}, }, /* 20 fps */ { {0, }, {9, 956, 650, {0x12, 0xF4, 0x1D, 0x14, 0x4A, 0x12, 0x92, 0x09, 0x48, 0xBC, 0x33, 0x10, 0x03}}, {6, 776, 528, {0x12, 0xF4, 0x2D, 0x10, 0x7E, 0x0E, 0xC6, 0x0A, 0x40, 0x08, 0x3F, 0x10, 0x03}}, {4, 591, 402, {0x12, 0xF4, 0x3D, 0x0C, 0x8F, 0x0A, 0xD7, 0x0E, 0x40, 0x4F, 0x4E, 0x10, 0x03}}, }, /* 25 fps */ { {0, }, {9, 956, 544, {0x11, 0xF4, 0x25, 0x10, 0xF4, 0x0F, 0x3C, 0x0A, 0x48, 0xBC, 0x33, 0xC0, 0x02}}, {7, 840, 478, {0x11, 0xF4, 0x2D, 0x0E, 0xEB, 0x0D, 0x33, 0x0B, 0x48, 0x48, 0x3B, 0xC0, 0x02}}, {5, 703, 400, {0x11, 0xF4, 0x3D, 0x0C, 0x7A, 0x0A, 0xC2, 0x0E, 0x48, 0xBF, 0x42, 0xC0, 0x02}}, }, /* 30 fps */ { {0, }, {9, 956, 438, {0x10, 0xF4, 0x35, 0x0D, 0xAC, 0x0B, 0xF4, 0x0D, 0x50, 0xBC, 0x33, 0x10, 0x02}}, {7, 838, 384, {0x10, 0xF4, 0x45, 0x0B, 0xFD, 0x0A, 0x45, 0x0F, 0x50, 0x46, 0x3B, 0x10, 0x02}}, {6, 773, 354, {0x10, 0x7A, 0x4B, 0x0B, 0x0C, 0x09, 0x80, 0x10, 0x50, 0x05, 0x3F, 0x10, 0x02}}, }, }, /* VGA */ { /* 5 fps */ { {0, }, {6, 773, 1272, {0x1D, 0xF4, 0x15, 0x27, 0xB6, 0x24, 0x96, 0x02, 0x30, 0x05, 0x3F, 0x10, 0x02}}, {4, 592, 976, {0x1D, 0xF4, 0x25, 0x1E, 0x78, 0x1B, 0x58, 0x03, 0x30, 0x50, 0x4E, 0x10, 0x02}}, {3, 448, 738, {0x1D, 0xF4, 0x3D, 0x17, 0x0C, 0x13, 0xEC, 0x04, 0x30, 0xC0, 0x69, 0x10, 0x02}}, }, /* 10 fps */ { {0, }, {9, 956, 788, {0x1C, 0xF4, 0x35, 0x18, 0x9C, 0x15, 0x7C, 0x03, 0x48, 0xBC, 0x33, 0x10, 0x02}}, {6, 776, 640, {0x1C, 0x7A, 0x53, 0x13, 0xFC, 0x11, 0x2C, 0x04, 0x48, 0x08, 0x3F, 0x10, 0x02}}, {4, 592, 488, {0x1C, 0x7A, 0x6B, 0x0F, 0x3C, 0x0C, 0x6C, 0x06, 0x48, 0x50, 0x4E, 0x10, 0x02}}, }, /* 15 fps */ { {0, }, {9, 957, 526, {0x1B, 0x7A, 0x63, 0x10, 0x68, 0x0D, 0x98, 0x06, 0x58, 0xBD, 0x33, 0x80, 0x02}}, {9, 957, 526, {0x1B, 0x7A, 0x63, 0x10, 0x68, 0x0D, 0x98, 0x06, 0x58, 0xBD, 0x33, 0x80, 0x02}}, {8, 895, 492, {0x1B, 0x7A, 0x6B, 0x0F, 0x5D, 0x0C, 0x8D, 0x06, 0x58, 0x7F, 0x37, 0x80, 0x02}}, }, /* 20 fps */ { {0, }, {0, }, {0, }, {0, }, }, /* 25 fps */ { {0, }, {0, }, {0, }, {0, }, }, /* 30 fps */ { {0, }, {0, }, {0, }, {0, }, }, }, }; /* * 16 versions: * 2 tables (one for Y, and one for U&V) * 16 levels of details per tables * 8 blocs */ const unsigned int TimonRomTable [16][2][16][8] = { { /* version 0 */ { /* version 0, passes 0 */ {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000001}, {0x00000000,0x00000000,0x00000001,0x00000001, 0x00000001,0x00000001,0x00000001,0x00000001}, {0x00000000,0x00000000,0x00000001,0x00000001, 0x00000001,0x00000009,0x00000009,0x00000009}, {0x00000000,0x00000000,0x00000009,0x00000001, 0x00000009,0x00000009,0x00000009,0x00000009}, {0x00000000,0x00000000,0x00000009,0x00000009, 0x00000009,0x00000009,0x00000049,0x00000009}, {0x00000000,0x00000000,0x00000009,0x00000009, 0x00000009,0x00000049,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00000009,0x00000009, 0x00000049,0x00000049,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00000009,0x00000049, 0x00000049,0x00000049,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000249,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000249,0x00000249,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000249,0x00000249,0x00001252,0x0000024a}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000249,0x0000124a,0x00001252,0x0000024a}, {0x00000000,0x00000000,0x00000049,0x00000249, 0x00000249,0x0000124a,0x00001252,0x0000024a}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x0000124a,0x00009252,0x00009292,0x00001252}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 0, passes 1 */ {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000}, {0x00000000,0x00000000,0x00000001,0x00000001, 0x00000001,0x00000001,0x00000000,0x00000000}, {0x00000000,0x00000000,0x00000009,0x00000001, 0x00000001,0x00000009,0x00000000,0x00000000}, {0x00000000,0x00000000,0x00000009,0x00000009, 0x00000009,0x00000009,0x00000000,0x00000000}, {0x00000000,0x00000000,0x00000009,0x00000009, 0x00000009,0x00000009,0x00000001,0x00000000}, {0x00000000,0x00000000,0x00000049,0x00000009, 0x00000009,0x00000049,0x00000001,0x00000001}, {0x00000000,0x00000000,0x00000049,0x00000009, 0x00000009,0x00000049,0x00000001,0x00000001}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000009,0x00000001}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000009,0x00000001}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000009,0x00000001}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000009,0x00000009}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000249,0x00000049,0x00000009}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000249,0x00000049,0x00000009}, {0x00000000,0x00000000,0x00000249,0x00000049, 0x00000249,0x00000249,0x00000049,0x00000009}, {0x00000000,0x00000000,0x00001249,0x00000249, 0x0000124a,0x0000124a,0x0000024a,0x00000049}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 1 */ { /* version 1, passes 0 */ {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000001}, {0x00000000,0x00000000,0x00000001,0x00000001, 0x00000001,0x00000009,0x00000009,0x00000009}, {0x00000000,0x00000000,0x00000009,0x00000009, 0x00000009,0x00000009,0x00000009,0x00000009}, {0x00000000,0x00000000,0x00000009,0x00000009, 0x00000009,0x00000049,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00000009,0x00000049, 0x00000049,0x00000049,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000249,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000249,0x00000249,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00000049,0x00000249, 0x00000249,0x00000249,0x0000024a,0x00001252}, {0x00000000,0x00000000,0x00000049,0x00000249, 0x00000249,0x0000124a,0x00001252,0x00001252}, {0x00000000,0x00000000,0x00000049,0x00000249, 0x0000124a,0x0000124a,0x00001252,0x00001252}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x0000124a,0x00009292,0x00009292}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x0000124a,0x00009252,0x00009292,0x00009292}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x00009252,0x00009252,0x00009292,0x00009292}, {0x00000000,0x00000000,0x00000249,0x0000924a, 0x00009292,0x00009493,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00001249,0x00009252, 0x00009492,0x0000a49b,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 1, passes 1 */ {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000}, {0x00000000,0x00000000,0x00000009,0x00000009, 0x00000009,0x00000001,0x00000001,0x00000000}, {0x00000000,0x00000000,0x00000009,0x00000009, 0x00000009,0x00000009,0x00000001,0x00000000}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000009,0x00000001,0x00000000}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000001,0x00000001}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000009,0x00000001}, {0x00000000,0x00000000,0x00000249,0x00000049, 0x00000049,0x00000249,0x00000009,0x00000001}, {0x00000000,0x00000000,0x00000249,0x00000049, 0x00000249,0x00000249,0x00000009,0x00000009}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x00000249,0x00000049,0x00000009}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x0000124a,0x00000049,0x00000009}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x0000124a,0x00000049,0x00000009}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x0000124a,0x0000024a,0x00000049}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x0000124a,0x0000024a,0x00000049}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x0000124a,0x0000024a,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009252,0x00001252,0x0000024a}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 2 */ { /* version 2, passes 0 */ {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000001}, {0x00000000,0x00000000,0x00000009,0x00000009, 0x00000009,0x00000009,0x00000009,0x00000009}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000249,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00000049,0x00000249, 0x00000249,0x00000249,0x0000024a,0x00001252}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x0000124a,0x00001252,0x00001252}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x0000124a,0x00009292,0x00009292}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x0000124a,0x00009252,0x00009292,0x00009292}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x00009252,0x00009292,0x00009292,0x00009292}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x00009252,0x00009292,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00000249,0x0000924a, 0x00009252,0x00009493,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00000249,0x0000924a, 0x00009292,0x00009493,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00000249,0x00009252, 0x00009492,0x00009493,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x000124db,0x000124db,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000126dc,0x000126dc,0x000126dc}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 2, passes 1 */ {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000}, {0x00000000,0x00000000,0x00000049,0x00000009, 0x00000049,0x00000009,0x00000001,0x00000000}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000049,0x00000000}, {0x00000000,0x00000000,0x00000249,0x00000049, 0x00000249,0x00000049,0x0000024a,0x00000001}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x00000249,0x0000024a,0x00000001}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x00000249,0x0000024a,0x00000001}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x00000249,0x0000024a,0x00000009}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x0000124a,0x0000024a,0x00000009}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x0000124a,0x0000024a,0x00000009}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x00009252,0x00001252,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x00009292,0x00001252,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x00009292,0x00001252,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009292,0x00001252,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009292,0x00009292,0x00001252,0x0000024a}, {0x00000000,0x00000000,0x0000924a,0x0000924a, 0x00009492,0x00009493,0x00009292,0x00001252}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 3 */ { /* version 3, passes 0 */ {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000001}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00000049,0x00000249, 0x00000249,0x00000249,0x00001252,0x0000024a}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x0000124a,0x00001252,0x00001252}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x00009252,0x00009292,0x00009292}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x0000124a,0x00009292,0x00009292,0x00009493}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x00009252,0x00009292,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x00009292,0x00009493,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00000249,0x00009252, 0x00009292,0x00009493,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00001249,0x00009252, 0x00009292,0x0000a49b,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00001249,0x00009252, 0x00009492,0x0000a49b,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x0000a49b,0x000124db,0x000124db}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x0000a493,0x0000a49b,0x000124db,0x000124db}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0001249b,0x000126dc,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000136e4,0x0001b725,0x000136e4}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 3, passes 1 */ {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000}, {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000001,0x00000000}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x00000249,0x00000049,0x00000001}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x0000124a,0x00001252,0x00000001}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x0000124a,0x00001252,0x00000009}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x0000124a,0x00009252,0x00009292,0x00000009}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x00009252,0x00009292,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009252,0x00009292,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009493,0x00009292,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009493,0x00009292,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009493,0x00009493,0x00001252}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009292,0x00009493,0x00009493,0x00001252}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009492,0x00009493,0x00009493,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009252, 0x00009492,0x0000a49b,0x00009493,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x00009292, 0x0000a493,0x000124db,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 4 */ { /* version 4, passes 0 */ {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x0000124a,0x00001252,0x00009292}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x00009252,0x00009292,0x00009292}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x0000124a,0x00009292,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x00009252,0x00009493,0x00009493,0x0000a49b}, {0x00000000,0x00000000,0x00000249,0x0000924a, 0x00009292,0x00009493,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009292,0x00009493,0x0000a49b,0x000124db}, {0x00000000,0x00000000,0x00001249,0x00009252, 0x00009492,0x0000a49b,0x0000a49b,0x000124db}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x000124db,0x000124db,0x000126dc}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x0000a493,0x000124db,0x000126dc,0x000126dc}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x000136e4}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000126dc,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0001249b,0x000126dc,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000136e4,0x000136e4,0x0001b724}, {0x00000000,0x00000000,0x00009252,0x000124db, 0x000126dc,0x0001b724,0x0001b725,0x0001b925}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 4, passes 1 */ {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x00000249,0x0000024a,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00000249, 0x0000124a,0x0000124a,0x00001252,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x0000124a,0x00009292,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009292,0x00009292,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009292,0x0000a49b,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009292,0x00009493,0x0000a49b,0x00001252}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009292,0x00009493,0x0000a49b,0x00001252}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009492,0x0000a49b,0x0000a49b,0x00001252}, {0x00000000,0x00000000,0x00001249,0x00009252, 0x00009492,0x0000a49b,0x0000a49b,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x0000a49b,0x0000a49b,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x0000a49b,0x0000a49b,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x0000a49b,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000124db,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x00009252,0x0000a49b, 0x0001249b,0x000126dc,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 5 */ { /* version 5, passes 0 */ {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x0000124a,0x00001252,0x00009292}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x0000124a,0x00009292,0x00009292,0x00009493}, {0x00000000,0x00000000,0x00000249,0x0000924a, 0x00009292,0x00009493,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009292,0x00009493,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009492,0x0000a49b,0x0000a49b,0x000124db}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x0000a49b,0x000124db,0x000124db}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x0000a493,0x000124db,0x000124db,0x000126dc}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x000126dc}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000126dc,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0001249b,0x000126dc,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0001249b,0x000126dc,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0001249b,0x000126dc,0x0001b725,0x0001b724}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000126dc,0x0001b725,0x0001b724}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000126dc,0x000136e4,0x0001b92d,0x0001b925}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001b724,0x0001c96e,0x0001c92d}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 5, passes 1 */ {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x00000249,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x0000124a,0x00001252,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009292,0x00009493,0x00009493,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009292,0x00009493,0x00009493,0x00001252}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009292,0x00009493,0x0000a49b,0x00001252}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009492,0x00009493,0x000124db,0x00001252}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x00009493,0x000124db,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x0000a49b,0x000124db,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x0000a49b,0x000124db,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000124db,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000124db,0x000124db,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000124db,0x000124db,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000124db,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000126dc,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x00009252,0x000124db, 0x000126dc,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 6 */ { /* version 6, passes 0 */ {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x0000124a,0x00009292,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009292,0x00009493,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009492,0x0000a49b,0x0000a49b,0x000124db}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x000124db,0x000126dc,0x000126dc}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x000126dc}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000126dc,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000126dc,0x000136e4,0x0001b724}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0001249b,0x000126dc,0x000136e4,0x0001b724}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x0001b724}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000136e4,0x0001b725,0x0001b724}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000136e4,0x0001b725,0x0001b925}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000126dc,0x000136e4,0x0001b92d,0x0001b925}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000126dc,0x0001b724,0x0001b92d,0x0001c92d}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000126dc,0x0001b724,0x0001c96e,0x0001c92d}, {0x00000000,0x00000000,0x0000a492,0x000126db, 0x000136e4,0x0001b925,0x00025bb6,0x00024b77}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 6, passes 1 */ {0x00000000,0x00000000,0x00001249,0x00000249, 0x0000124a,0x0000124a,0x00001252,0x00001252}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009292,0x00009292,0x00001252}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009492,0x00009493,0x0000a49b,0x00001252}, {0x00000000,0x00000000,0x00001249,0x00009252, 0x00009492,0x0000a49b,0x0000a49b,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x0000a49b,0x0000a49b,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x0000a49b,0x000126dc,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x0000a49b,0x000126dc,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x0000a49b,0x000126dc,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x000124db,0x000126dc,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000126dc,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x00009492,0x0000a49b, 0x000136e4,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x0000a492,0x000124db, 0x0001b724,0x0001b724,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 7 */ { /* version 7, passes 0 */ {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009292,0x00009493,0x0000a49b,0x000124db}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x0000a493,0x0000a49b,0x000124db,0x000126dc}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x000136e4}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0001249b,0x000126dc,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x00001249,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x0001b724}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x0001b724}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000136e4,0x0001b725,0x0001b724}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x000136e4,0x0001b725,0x0001b925}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x0001b724,0x0001b92d,0x0001b925}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000126dc,0x0001b724,0x0001c96e,0x0001c92d}, {0x00000000,0x00000000,0x00009292,0x000124db, 0x000126dc,0x0001b724,0x0001c96e,0x0001c92d}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001b724,0x0001c96e,0x0002496e}, {0x00000000,0x00000000,0x00009492,0x000126db, 0x000136e4,0x0001b925,0x0001c96e,0x0002496e}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b724,0x0002496d,0x00025bb6,0x00025bbf}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 7, passes 1 */ {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009292,0x00009292,0x00009292}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009492,0x00009493,0x00009493,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x0000a49b,0x0000a49b,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x0000a49b,0x000124db,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000124db,0x000124db,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x000124db,0x000136e4,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x000124db,0x000136e4,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000124db,0x000136e4,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x000136e4,0x000136e4,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x000136e4,0x000136e4,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000136e4,0x000136e4,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x0000a492,0x000124db, 0x000136e4,0x0001b724,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x00012492,0x000126db, 0x0001b724,0x0001b925,0x0001b725,0x000136e4}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 8 */ { /* version 8, passes 0 */ {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009292,0x00009493,0x0000a49b,0x000124db}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x0000a493,0x000124db,0x000126dc,0x000126dc}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x000136e4}, {0x00000000,0x00000000,0x00001249,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x0001b724}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x0001b724}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000136e4,0x0001b725,0x0001b724}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x000136e4,0x0001b725,0x0001b925}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x0001b724,0x0001b92d,0x0001c92d}, {0x00000000,0x00000000,0x00009252,0x000124db, 0x000126dc,0x0001b724,0x0001b92d,0x0001c92d}, {0x00000000,0x00000000,0x00009292,0x000124db, 0x000126dc,0x0001b925,0x0001c96e,0x0001c92d}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001b925,0x0001c96e,0x0001c92d}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001b925,0x00024b76,0x00024b77}, {0x00000000,0x00000000,0x00009492,0x000126db, 0x000136e4,0x0001b925,0x00024b76,0x00025bbf}, {0x00000000,0x00000000,0x0000a492,0x000126db, 0x000136e4,0x0001c92d,0x00024b76,0x00025bbf}, {0x00000000,0x00000000,0x00012492,0x000136db, 0x0001b724,0x00024b6d,0x0002ddb6,0x0002efff}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 8, passes 1 */ {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009493,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x0000a493,0x0000a49b,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x0000a49b,0x000124db,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x000124db,0x000126dc,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x000124db,0x000136e4,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x000126dc,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x000136e4,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000126dc,0x000136e4,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000126dc,0x000136e4,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x00009292,0x000124db, 0x000136e4,0x0001b724,0x0001b725,0x000136e4}, {0x00000000,0x00000000,0x00009492,0x000126db, 0x000136e4,0x0001b925,0x0001b725,0x0001b724}, {0x00000000,0x00000000,0x00009492,0x000126db, 0x000136e4,0x0001b925,0x0001b725,0x0001b724}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b724,0x0002496d,0x0001b92d,0x0001b925}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 9 */ { /* version 9, passes 0 */ {0x00000000,0x00000000,0x00000049,0x00000049, 0x00000049,0x00000049,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00000249,0x00000049, 0x00000249,0x00000249,0x0000024a,0x00000049}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x0000124a,0x00009252,0x00001252,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009292,0x00009493,0x00001252}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009292,0x00009493,0x00009493,0x00001252}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x0000a49b,0x0000a49b,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000124db,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0001249b,0x000126dc,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x00009252,0x00009493, 0x000124db,0x000136e4,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x00009252,0x0000a49b, 0x000124db,0x000136e4,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000126dc,0x000136e4,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x00009492,0x0000a49b, 0x000126dc,0x0001b724,0x0001b725,0x0001b724}, {0x00000000,0x00000000,0x0000a492,0x000124db, 0x000136e4,0x0001b925,0x0001b92d,0x0001b925}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 9, passes 1 */ {0x00000000,0x00000000,0x00000249,0x00000049, 0x00000009,0x00000009,0x00000009,0x00000009}, {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000049,0x00000049,0x00000009,0x00000009}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x00000249,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x0000124a,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x0000124a,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009252,0x0000124a,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x00009252,0x00001252,0x00001252}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x00009292,0x00009292,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x00009292,0x00009292,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x00009493,0x00009493,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x0000a49b,0x00009493,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x0000a49b,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000124db,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000136e4,0x000126dc,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x00009252,0x000124db, 0x0001b724,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 10 */ { /* version 10, passes 0 */ {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x00000249,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00000249,0x00001249, 0x00009252,0x00009292,0x00009292,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009292,0x00009292,0x00001252}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009492,0x00009493,0x0000a49b,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x000124db,0x000124db,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000124db,0x00009493}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0001249b,0x000126dc,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000126dc,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x00009252,0x0000a49b, 0x000124db,0x000136e4,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000126dc,0x000136e4,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x00009492,0x0000a49b, 0x000126dc,0x0001b724,0x0001b92d,0x0001b724}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000126dc,0x0001b925,0x0001b92d,0x0001b925}, {0x00000000,0x00000000,0x0000a492,0x000126db, 0x000136e4,0x0002496d,0x0001c96e,0x0001c92d}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 10, passes 1 */ {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000049,0x00000049,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x00000249,0x00000049,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x00009252,0x0000024a,0x00000049}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009493,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00009252, 0x00009492,0x00009493,0x00001252,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x00009493,0x00001252,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x00009492,0x00009493,0x00009292,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x00009493,0x00009292,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x0000a49b,0x00009493,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x0000a49b,0x00009493,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x000124db,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x000124db,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000136e4,0x000126dc,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000136e4,0x000126dc,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x00009252,0x000126db, 0x0001b724,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 11 */ { /* version 11, passes 0 */ {0x00000000,0x00000000,0x00000249,0x00000249, 0x00000249,0x00000249,0x00001252,0x00001252}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009292,0x00009292,0x00001252}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009492,0x0000a49b,0x0000a49b,0x00009292}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x0000a49b,0x000124db,0x00009493}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000126dc,0x000126dc,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x000136e4,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000126dc,0x000136e4,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000126dc,0x0001b724,0x0001b725,0x000136e4}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000136e4,0x0001b724,0x0001b92d,0x0001b724}, {0x00000000,0x00000000,0x00009492,0x0000a49b, 0x000136e4,0x0001b724,0x0001b92d,0x0001b724}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001b925,0x0001c96e,0x0001b925}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x0001b724,0x0001b925,0x0001c96e,0x0001c92d}, {0x00000000,0x00000000,0x0000a492,0x000126db, 0x0001c924,0x0002496d,0x00025bb6,0x00024b77}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 11, passes 1 */ {0x00000000,0x00000000,0x00001249,0x00000249, 0x00000249,0x00000249,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x0000124a,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009252,0x00009252,0x0000024a,0x0000024a}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x0000a49b,0x00001252,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x0000a49b,0x00001252,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x0000a49b,0x00009292,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x0000a49b,0x00009292,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x0000a49b,0x00009493,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000124db,0x00009493,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000124db,0x00009493,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000124db,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000126dc,0x000126dc,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000136e4,0x000126dc,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x00009292,0x000124db, 0x000136e4,0x000126dc,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x00009492,0x000126db, 0x0001b724,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 12 */ { /* version 12, passes 0 */ {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009292,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x0000a493,0x0000a49b,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x000126dc,0x000126dc,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x000136e4,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000126dc,0x0001b724,0x0001b725,0x000126dc}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000136e4,0x0001b724,0x0001b92d,0x000136e4}, {0x00000000,0x00000000,0x00009492,0x0000a49b, 0x000136e4,0x0001b724,0x0001b92d,0x0001b724}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001b724,0x0001b92d,0x0001b724}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001b925,0x0001b92d,0x0001b925}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x0001b724,0x0001b925,0x0001c96e,0x0001c92d}, {0x00000000,0x00000000,0x0000a492,0x000124db, 0x0001b724,0x0001c92d,0x0001c96e,0x0001c92d}, {0x00000000,0x00000000,0x0000a492,0x000124db, 0x0001b724,0x0001c92d,0x00024b76,0x0002496e}, {0x00000000,0x00000000,0x00012492,0x000126db, 0x0001c924,0x00024b6d,0x0002ddb6,0x00025bbf}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 12, passes 1 */ {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x0000124a,0x00001252,0x00001252}, {0x00000000,0x00000000,0x00001249,0x00009292, 0x00009492,0x00009252,0x00001252,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x00009292,0x00001252,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x0000a49b,0x00009292,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x0000a49b,0x00009292,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x0000a49b,0x00009493,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000124db,0x00009493,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000124db,0x000124db,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000126dc,0x000124db,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000126dc,0x000126dc,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000136e4,0x000126dc,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x00009492,0x000126db, 0x000136e4,0x000126dc,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x00009492,0x000126db, 0x0001b724,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x00009492,0x000126db, 0x0001b724,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001c924,0x0001b724,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 13 */ { /* version 13, passes 0 */ {0x00000000,0x00000000,0x00001249,0x00001249, 0x00009252,0x00009292,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x000124db,0x000126dc,0x00009493}, {0x00000000,0x00000000,0x00001249,0x0000a49b, 0x0001249b,0x000126dc,0x000126dc,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x000136e4,0x0001b725,0x000124db}, {0x00000000,0x00000000,0x00009292,0x0000a49b, 0x000136e4,0x0001b724,0x0001b725,0x000126dc}, {0x00000000,0x00000000,0x00009292,0x000124db, 0x000136e4,0x0001b724,0x0001b725,0x000126dc}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001b724,0x0001c96e,0x000136e4}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001c92d,0x0001c96e,0x0001b724}, {0x00000000,0x00000000,0x0000a492,0x000124db, 0x000136e4,0x0001c92d,0x0001c96e,0x0001b724}, {0x00000000,0x00000000,0x0000a492,0x000124db, 0x0001b724,0x0001c92d,0x0001c96e,0x0001b925}, {0x00000000,0x00000000,0x0000a492,0x000126db, 0x0001b724,0x0001c92d,0x00024b76,0x0001c92d}, {0x00000000,0x00000000,0x0000a492,0x000126db, 0x0001b924,0x0001c92d,0x00024b76,0x0001c92d}, {0x00000000,0x00000000,0x0000a492,0x000126db, 0x0001b924,0x0001c92d,0x00024b76,0x0002496e}, {0x00000000,0x00000000,0x00012492,0x000136db, 0x00024924,0x00024b6d,0x0002ddb6,0x00025bbf}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 13, passes 1 */ {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x0000124a,0x00001252,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x00009492,0x00009292,0x00001252,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x0000a49b,0x00001252,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x0000a49b,0x00009292,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x0000a49b,0x00009292,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x0000a49b,0x00009493,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000126dc,0x000124db,0x00009493,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000136e4,0x000124db,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x000136db, 0x0001b724,0x000124db,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x000136db, 0x0001b724,0x000126dc,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00009292,0x000136db, 0x0001b724,0x000126dc,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x00009492,0x000136db, 0x0001b724,0x000126dc,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b724,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b724,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x00012492,0x0001b6db, 0x0001c924,0x0001b724,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 14 */ { /* version 14, passes 0 */ {0x00000000,0x00000000,0x00001249,0x0000924a, 0x00009292,0x00009493,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00001249,0x0000a49b, 0x0000a493,0x000124db,0x000126dc,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x0000a49b}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000126dc,0x000136e4,0x0001b725,0x000124db}, {0x00000000,0x00000000,0x00009292,0x000124db, 0x000126dc,0x0001b724,0x0001b92d,0x000126dc}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001b724,0x0001b92d,0x000126dc}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001c92d,0x0001c96e,0x000136e4}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x0001b724,0x0001c92d,0x0001c96e,0x0001b724}, {0x00000000,0x00000000,0x0000a492,0x000124db, 0x0001b724,0x0001c92d,0x00024b76,0x0001b925}, {0x00000000,0x00000000,0x0000a492,0x000126db, 0x0001b724,0x0001c92d,0x00024b76,0x0001c92d}, {0x00000000,0x00000000,0x0000a492,0x000126db, 0x0001b724,0x0001c92d,0x00024b76,0x0001c92d}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b724,0x0001c92d,0x00024b76,0x0002496e}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b924,0x0002496d,0x00024b76,0x00024b77}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b924,0x00024b6d,0x0002ddb6,0x00025bbf}, {0x00000000,0x00000000,0x00012492,0x0001b6db, 0x00024924,0x0002db6d,0x00036db6,0x0002efff}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 14, passes 1 */ {0x00000000,0x00000000,0x00001249,0x00001249, 0x0000124a,0x0000124a,0x00001252,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x00009493, 0x0000a493,0x00009292,0x00001252,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x0000a49b,0x00001252,0x00001252}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000136e4,0x00009292,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000136e4,0x00009292,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000136e4,0x000136e4,0x00009493,0x00009292}, {0x00000000,0x00000000,0x00009492,0x000136db, 0x0001b724,0x000136e4,0x00009493,0x00009493}, {0x00000000,0x00000000,0x00009492,0x000136db, 0x0001b724,0x000136e4,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x00009492,0x000136db, 0x0001b724,0x000136e4,0x0000a49b,0x00009493}, {0x00000000,0x00000000,0x00009492,0x000136db, 0x0001b724,0x000136e4,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b724,0x000136e4,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b724,0x000136e4,0x000124db,0x0000a49b}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b724,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b724,0x000136e4,0x000126dc,0x000124db}, {0x00000000,0x00000000,0x00012492,0x0001b6db, 0x0001c924,0x0001b724,0x000136e4,0x000126dc}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } }, { /* version 15 */ { /* version 15, passes 0 */ {0x00000000,0x00000000,0x00001249,0x00009493, 0x0000a493,0x0000a49b,0x000124db,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0001249b,0x000126dc,0x000136e4,0x000124db}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x000126dc,0x0001b724,0x0001b725,0x000126dc}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000136e4,0x0001b724,0x0001b92d,0x000126dc}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x000136e4,0x0001b925,0x0001c96e,0x000136e4}, {0x00000000,0x00000000,0x00009492,0x000124db, 0x0001b724,0x0001c92d,0x0001c96e,0x0001b724}, {0x00000000,0x00000000,0x0000a492,0x000124db, 0x0001b724,0x0001c92d,0x0001c96e,0x0001b724}, {0x00000000,0x00000000,0x0000a492,0x000126db, 0x0001b724,0x0001c92d,0x0001c96e,0x0001b925}, {0x00000000,0x00000000,0x0000a492,0x000126db, 0x0001b924,0x0001c92d,0x00024b76,0x0001c92d}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b924,0x0001c92d,0x00024b76,0x0001c92d}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001b924,0x0002496d,0x00024b76,0x0002496e}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001c924,0x0002496d,0x00025bb6,0x00024b77}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001c924,0x00024b6d,0x00025bb6,0x00024b77}, {0x00000000,0x00000000,0x00012492,0x000136db, 0x0001c924,0x00024b6d,0x0002ddb6,0x00025bbf}, {0x00000000,0x00000000,0x00012492,0x0001b6db, 0x00024924,0x0002db6d,0x00036db6,0x0002efff}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} }, { /* version 15, passes 1 */ {0x00000000,0x00000000,0x0000924a,0x0000924a, 0x00009292,0x00009292,0x00009292,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x0000a49b, 0x0000a493,0x000124db,0x00009292,0x00009292}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000124db,0x0001b724,0x00009493,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000126dc,0x0001b724,0x00009493,0x00009493}, {0x00000000,0x00000000,0x0000924a,0x000124db, 0x000136e4,0x0001b724,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00009292,0x000136db, 0x0001b724,0x0001b724,0x0000a49b,0x0000a49b}, {0x00000000,0x00000000,0x00009492,0x000136db, 0x0001c924,0x0001b724,0x000124db,0x000124db}, {0x00000000,0x00000000,0x00009492,0x000136db, 0x0001c924,0x0001b724,0x000124db,0x000124db}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001c924,0x0001b724,0x000126dc,0x000126dc}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001c924,0x0001b925,0x000126dc,0x000126dc}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001c924,0x0001b925,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001c924,0x0001b925,0x000136e4,0x000136e4}, {0x00000000,0x00000000,0x0000a492,0x000136db, 0x0001c924,0x0001b925,0x0001b725,0x0001b724}, {0x00000000,0x00000000,0x00012492,0x000136db, 0x0001c924,0x0001b925,0x0001b725,0x0001b724}, {0x00000000,0x00000000,0x00012492,0x0001b6db, 0x00024924,0x0002496d,0x0001b92d,0x0001b925}, {0x00000000,0x00000000,0x00000000,0x00000000, 0x00000000,0x00000000,0x00000000,0x00000000} } } };
gpl-2.0
AdrianHuang/rt-thread
components/external/freetype/src/gxvalid/gxvmorx2.c
147
11486
/***************************************************************************/ /* */ /* gxvmorx2.c */ /* */ /* TrueTypeGX/AAT morx table validation */ /* body for type2 (Ligature Substitution) subtable. */ /* */ /* Copyright 2005, 2013 by suzuki toshiya, Masatake YAMATO, Red Hat K.K., */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ /***************************************************************************/ /* */ /* gxvalid is derived from both gxlayout module and otvalid module. */ /* Development of gxlayout is supported by the Information-technology */ /* Promotion Agency(IPA), Japan. */ /* */ /***************************************************************************/ #include "gxvmorx.h" /*************************************************************************/ /* */ /* The macro FT_COMPONENT is used in trace mode. It is an implicit */ /* parameter of the FT_TRACE() and FT_ERROR() macros, used to print/log */ /* messages during execution. */ /* */ #undef FT_COMPONENT #define FT_COMPONENT trace_gxvmorx typedef struct GXV_morx_subtable_type2_StateOptRec_ { FT_ULong ligActionTable; FT_ULong componentTable; FT_ULong ligatureTable; FT_ULong ligActionTable_length; FT_ULong componentTable_length; FT_ULong ligatureTable_length; } GXV_morx_subtable_type2_StateOptRec, *GXV_morx_subtable_type2_StateOptRecData; #define GXV_MORX_SUBTABLE_TYPE2_HEADER_SIZE \ ( GXV_XSTATETABLE_HEADER_SIZE + 4 + 4 + 4 ) static void gxv_morx_subtable_type2_opttable_load( FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { FT_Bytes p = table; GXV_morx_subtable_type2_StateOptRecData optdata = (GXV_morx_subtable_type2_StateOptRecData)gxvalid->xstatetable.optdata; GXV_LIMIT_CHECK( 4 + 4 + 4 ); optdata->ligActionTable = FT_NEXT_ULONG( p ); optdata->componentTable = FT_NEXT_ULONG( p ); optdata->ligatureTable = FT_NEXT_ULONG( p ); GXV_TRACE(( "offset to ligActionTable=0x%08x\n", optdata->ligActionTable )); GXV_TRACE(( "offset to componentTable=0x%08x\n", optdata->componentTable )); GXV_TRACE(( "offset to ligatureTable=0x%08x\n", optdata->ligatureTable )); } static void gxv_morx_subtable_type2_subtable_setup( FT_ULong table_size, FT_ULong classTable, FT_ULong stateArray, FT_ULong entryTable, FT_ULong* classTable_length_p, FT_ULong* stateArray_length_p, FT_ULong* entryTable_length_p, GXV_Validator gxvalid ) { FT_ULong o[6]; FT_ULong* l[6]; FT_ULong buff[7]; GXV_morx_subtable_type2_StateOptRecData optdata = (GXV_morx_subtable_type2_StateOptRecData)gxvalid->xstatetable.optdata; GXV_NAME_ENTER( "subtable boundaries setup" ); o[0] = classTable; o[1] = stateArray; o[2] = entryTable; o[3] = optdata->ligActionTable; o[4] = optdata->componentTable; o[5] = optdata->ligatureTable; l[0] = classTable_length_p; l[1] = stateArray_length_p; l[2] = entryTable_length_p; l[3] = &(optdata->ligActionTable_length); l[4] = &(optdata->componentTable_length); l[5] = &(optdata->ligatureTable_length); gxv_set_length_by_ulong_offset( o, l, buff, 6, table_size, gxvalid ); GXV_TRACE(( "classTable: offset=0x%08x length=0x%08x\n", classTable, *classTable_length_p )); GXV_TRACE(( "stateArray: offset=0x%08x length=0x%08x\n", stateArray, *stateArray_length_p )); GXV_TRACE(( "entryTable: offset=0x%08x length=0x%08x\n", entryTable, *entryTable_length_p )); GXV_TRACE(( "ligActionTable: offset=0x%08x length=0x%08x\n", optdata->ligActionTable, optdata->ligActionTable_length )); GXV_TRACE(( "componentTable: offset=0x%08x length=0x%08x\n", optdata->componentTable, optdata->componentTable_length )); GXV_TRACE(( "ligatureTable: offset=0x%08x length=0x%08x\n", optdata->ligatureTable, optdata->ligatureTable_length )); GXV_EXIT; } #define GXV_MORX_LIGACTION_ENTRY_SIZE 4 static void gxv_morx_subtable_type2_ligActionIndex_validate( FT_Bytes table, FT_UShort ligActionIndex, GXV_Validator gxvalid ) { /* access ligActionTable */ GXV_morx_subtable_type2_StateOptRecData optdata = (GXV_morx_subtable_type2_StateOptRecData)gxvalid->xstatetable.optdata; FT_Bytes lat_base = table + optdata->ligActionTable; FT_Bytes p = lat_base + ligActionIndex * GXV_MORX_LIGACTION_ENTRY_SIZE; FT_Bytes lat_limit = lat_base + optdata->ligActionTable; if ( p < lat_base ) { GXV_TRACE(( "p < lat_base (%d byte rewind)\n", lat_base - p )); FT_INVALID_OFFSET; } else if ( lat_limit < p ) { GXV_TRACE(( "lat_limit < p (%d byte overrun)\n", p - lat_limit )); FT_INVALID_OFFSET; } { /* validate entry in ligActionTable */ FT_ULong lig_action; #ifdef GXV_LOAD_UNUSED_VARS FT_UShort last; FT_UShort store; #endif FT_ULong offset; FT_Long gid_limit; lig_action = FT_NEXT_ULONG( p ); #ifdef GXV_LOAD_UNUSED_VARS last = (FT_UShort)( ( lig_action >> 31 ) & 1 ); store = (FT_UShort)( ( lig_action >> 30 ) & 1 ); #endif offset = lig_action & 0x3FFFFFFFUL; /* this offset is 30-bit signed value to add to GID */ /* it is different from the location offset in mort */ if ( ( offset & 0x3FFF0000UL ) == 0x3FFF0000UL ) { /* negative offset */ gid_limit = gxvalid->face->num_glyphs - ( offset & 0x0000FFFFUL ); if ( gid_limit > 0 ) return; GXV_TRACE(( "ligature action table includes" " too negative offset moving all GID" " below defined range: 0x%04x\n", offset & 0xFFFFU )); GXV_SET_ERR_IF_PARANOID( FT_INVALID_OFFSET ); } else if ( ( offset & 0x3FFF0000UL ) == 0x00000000UL ) { /* positive offset */ if ( (FT_Long)offset < gxvalid->face->num_glyphs ) return; GXV_TRACE(( "ligature action table includes" " too large offset moving all GID" " over defined range: 0x%04x\n", offset & 0xFFFFU )); GXV_SET_ERR_IF_PARANOID( FT_INVALID_OFFSET ); } GXV_TRACE(( "ligature action table includes" " invalid offset to add to 16-bit GID:" " 0x%08x\n", offset )); GXV_SET_ERR_IF_PARANOID( FT_INVALID_OFFSET ); } } static void gxv_morx_subtable_type2_entry_validate( FT_UShort state, FT_UShort flags, GXV_StateTable_GlyphOffsetCPtr glyphOffset_p, FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { #ifdef GXV_LOAD_UNUSED_VARS FT_UShort setComponent; FT_UShort dontAdvance; FT_UShort performAction; #endif FT_UShort reserved; FT_UShort ligActionIndex; FT_UNUSED( state ); FT_UNUSED( limit ); #ifdef GXV_LOAD_UNUSED_VARS setComponent = (FT_UShort)( ( flags >> 15 ) & 1 ); dontAdvance = (FT_UShort)( ( flags >> 14 ) & 1 ); performAction = (FT_UShort)( ( flags >> 13 ) & 1 ); #endif reserved = (FT_UShort)( flags & 0x1FFF ); ligActionIndex = glyphOffset_p->u; if ( reserved > 0 ) GXV_TRACE(( " reserved 14bit is non-zero\n" )); if ( 0 < ligActionIndex ) gxv_morx_subtable_type2_ligActionIndex_validate( table, ligActionIndex, gxvalid ); } static void gxv_morx_subtable_type2_ligatureTable_validate( FT_Bytes table, GXV_Validator gxvalid ) { GXV_morx_subtable_type2_StateOptRecData optdata = (GXV_morx_subtable_type2_StateOptRecData)gxvalid->xstatetable.optdata; FT_Bytes p = table + optdata->ligatureTable; FT_Bytes limit = table + optdata->ligatureTable + optdata->ligatureTable_length; GXV_NAME_ENTER( "morx chain subtable type2 - substitutionTable" ); if ( 0 != optdata->ligatureTable ) { /* Apple does not give specification of ligatureTable format */ while ( p < limit ) { FT_UShort lig_gid; GXV_LIMIT_CHECK( 2 ); lig_gid = FT_NEXT_USHORT( p ); if ( lig_gid < gxvalid->face->num_glyphs ) GXV_SET_ERR_IF_PARANOID( FT_INVALID_GLYPH_ID ); } } GXV_EXIT; } FT_LOCAL_DEF( void ) gxv_morx_subtable_type2_validate( FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { FT_Bytes p = table; GXV_morx_subtable_type2_StateOptRec lig_rec; GXV_NAME_ENTER( "morx chain subtable type2 (Ligature Substitution)" ); GXV_LIMIT_CHECK( GXV_MORX_SUBTABLE_TYPE2_HEADER_SIZE ); gxvalid->xstatetable.optdata = &lig_rec; gxvalid->xstatetable.optdata_load_func = gxv_morx_subtable_type2_opttable_load; gxvalid->xstatetable.subtable_setup_func = gxv_morx_subtable_type2_subtable_setup; gxvalid->xstatetable.entry_glyphoffset_fmt = GXV_GLYPHOFFSET_USHORT; gxvalid->xstatetable.entry_validate_func = gxv_morx_subtable_type2_entry_validate; gxv_XStateTable_validate( p, limit, gxvalid ); #if 0 p += gxvalid->subtable_length; #endif gxv_morx_subtable_type2_ligatureTable_validate( table, gxvalid ); GXV_EXIT; } /* END */
gpl-2.0
drakaz/gaosp_kernel
drivers/watchdog/ks8695_wdt.c
147
7551
/* * Watchdog driver for Kendin/Micrel KS8695. * * (C) 2007 Andrew Victor * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/watchdog.h> #include <linux/io.h> #include <linux/uaccess.h> #include <mach/timex.h> #include <mach/regs-timer.h> #define WDT_DEFAULT_TIME 5 /* seconds */ #define WDT_MAX_TIME 171 /* seconds */ static int wdt_time = WDT_DEFAULT_TIME; static int nowayout = WATCHDOG_NOWAYOUT; module_param(wdt_time, int, 0); MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default=" __MODULE_STRING(WDT_DEFAULT_TIME) ")"); #ifdef CONFIG_WATCHDOG_NOWAYOUT module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #endif static unsigned long ks8695wdt_busy; static spinlock_t ks8695_lock; /* ......................................................................... */ /* * Disable the watchdog. */ static inline void ks8695_wdt_stop(void) { unsigned long tmcon; spin_lock(&ks8695_lock); /* disable timer0 */ tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON); __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); spin_unlock(&ks8695_lock); } /* * Enable and reset the watchdog. */ static inline void ks8695_wdt_start(void) { unsigned long tmcon; unsigned long tval = wdt_time * CLOCK_TICK_RATE; spin_lock(&ks8695_lock); /* disable timer0 */ tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON); __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); /* program timer0 */ __raw_writel(tval | T0TC_WATCHDOG, KS8695_TMR_VA + KS8695_T0TC); /* re-enable timer0 */ tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON); __raw_writel(tmcon | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); spin_unlock(&ks8695_lock); } /* * Reload the watchdog timer. (ie, pat the watchdog) */ static inline void ks8695_wdt_reload(void) { unsigned long tmcon; spin_lock(&ks8695_lock); /* disable, then re-enable timer0 */ tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON); __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); __raw_writel(tmcon | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); spin_unlock(&ks8695_lock); } /* * Change the watchdog time interval. */ static int ks8695_wdt_settimeout(int new_time) { /* * All counting occurs at SLOW_CLOCK / 128 = 0.256 Hz * * Since WDV is a 16-bit counter, the maximum period is * 65536 / 0.256 = 256 seconds. */ if ((new_time <= 0) || (new_time > WDT_MAX_TIME)) return -EINVAL; /* Set new watchdog time. It will be used when ks8695_wdt_start() is called. */ wdt_time = new_time; return 0; } /* ......................................................................... */ /* * Watchdog device is opened, and watchdog starts running. */ static int ks8695_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &ks8695wdt_busy)) return -EBUSY; ks8695_wdt_start(); return nonseekable_open(inode, file); } /* * Close the watchdog device. * If CONFIG_WATCHDOG_NOWAYOUT is NOT defined then the watchdog is also * disabled. */ static int ks8695_wdt_close(struct inode *inode, struct file *file) { /* Disable the watchdog when file is closed */ if (!nowayout) ks8695_wdt_stop(); clear_bit(0, &ks8695wdt_busy); return 0; } static struct watchdog_info ks8695_wdt_info = { .identity = "ks8695 watchdog", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, }; /* * Handle commands from user-space. */ static long ks8695_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int new_value; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ks8695_wdt_info, sizeof(ks8695_wdt_info)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: if (get_user(new_value, p)) return -EFAULT; if (new_value & WDIOS_DISABLECARD) ks8695_wdt_stop(); if (new_value & WDIOS_ENABLECARD) ks8695_wdt_start(); return 0; case WDIOC_KEEPALIVE: ks8695_wdt_reload(); /* pat the watchdog */ return 0; case WDIOC_SETTIMEOUT: if (get_user(new_value, p)) return -EFAULT; if (ks8695_wdt_settimeout(new_value)) return -EINVAL; /* Enable new time value */ ks8695_wdt_start(); /* Return current value */ return put_user(wdt_time, p); case WDIOC_GETTIMEOUT: return put_user(wdt_time, p); default: return -ENOTTY; } } /* * Pat the watchdog whenever device is written to. */ static ssize_t ks8695_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) { ks8695_wdt_reload(); /* pat the watchdog */ return len; } /* ......................................................................... */ static const struct file_operations ks8695wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = ks8695_wdt_ioctl, .open = ks8695_wdt_open, .release = ks8695_wdt_close, .write = ks8695_wdt_write, }; static struct miscdevice ks8695wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &ks8695wdt_fops, }; static int __init ks8695wdt_probe(struct platform_device *pdev) { int res; if (ks8695wdt_miscdev.parent) return -EBUSY; ks8695wdt_miscdev.parent = &pdev->dev; res = misc_register(&ks8695wdt_miscdev); if (res) return res; printk(KERN_INFO "KS8695 Watchdog Timer enabled (%d seconds%s)\n", wdt_time, nowayout ? ", nowayout" : ""); return 0; } static int __exit ks8695wdt_remove(struct platform_device *pdev) { int res; res = misc_deregister(&ks8695wdt_miscdev); if (!res) ks8695wdt_miscdev.parent = NULL; return res; } static void ks8695wdt_shutdown(struct platform_device *pdev) { ks8695_wdt_stop(); } #ifdef CONFIG_PM static int ks8695wdt_suspend(struct platform_device *pdev, pm_message_t message) { ks8695_wdt_stop(); return 0; } static int ks8695wdt_resume(struct platform_device *pdev) { if (ks8695wdt_busy) ks8695_wdt_start(); return 0; } #else #define ks8695wdt_suspend NULL #define ks8695wdt_resume NULL #endif static struct platform_driver ks8695wdt_driver = { .probe = ks8695wdt_probe, .remove = __exit_p(ks8695wdt_remove), .shutdown = ks8695wdt_shutdown, .suspend = ks8695wdt_suspend, .resume = ks8695wdt_resume, .driver = { .name = "ks8695_wdt", .owner = THIS_MODULE, }, }; static int __init ks8695_wdt_init(void) { spin_lock_init(&ks8695_lock); /* Check that the heartbeat value is within range; if not reset to the default */ if (ks8695_wdt_settimeout(wdt_time)) { ks8695_wdt_settimeout(WDT_DEFAULT_TIME); pr_info("ks8695_wdt: wdt_time value must be 1 <= wdt_time <= %i, using %d\n", wdt_time, WDT_MAX_TIME); } return platform_driver_register(&ks8695wdt_driver); } static void __exit ks8695_wdt_exit(void) { platform_driver_unregister(&ks8695wdt_driver); } module_init(ks8695_wdt_init); module_exit(ks8695_wdt_exit); MODULE_AUTHOR("Andrew Victor"); MODULE_DESCRIPTION("Watchdog driver for KS8695"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:ks8695_wdt");
gpl-2.0
HackerOO7/android_kernel_huawei_u8951
arch/arm/mach-msm/msm_bus/msm_bus_dbg.c
403
18720
/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/hrtimer.h> #include <mach/msm_bus_board.h> #include <mach/msm_bus.h> #include "msm_bus_core.h" #define MAX_BUFF_SIZE 4096 #define FILL_LIMIT 128 static struct dentry *clients; static struct dentry *dir; static DEFINE_MUTEX(msm_bus_dbg_fablist_lock); struct msm_bus_dbg_state { uint32_t cl; uint8_t enable; uint8_t current_index; } clstate; struct msm_bus_cldata { const struct msm_bus_scale_pdata *pdata; int index; uint32_t clid; int size; struct dentry *file; struct list_head list; char buffer[MAX_BUFF_SIZE]; }; struct msm_bus_fab_list { const char *name; int size; struct dentry *file; struct list_head list; char buffer[MAX_BUFF_SIZE]; }; LIST_HEAD(fabdata_list); LIST_HEAD(cl_list); /** * The following structures and funtions are used for * the test-client which can be created at run-time. */ static struct msm_bus_vectors init_vectors[1]; static struct msm_bus_vectors current_vectors[1]; static struct msm_bus_vectors requested_vectors[1]; static struct msm_bus_paths shell_client_usecases[] = { { .num_paths = ARRAY_SIZE(init_vectors), .vectors = init_vectors, }, { .num_paths = ARRAY_SIZE(current_vectors), .vectors = current_vectors, }, { .num_paths = ARRAY_SIZE(requested_vectors), .vectors = requested_vectors, }, }; static struct msm_bus_scale_pdata shell_client = { .usecase = shell_client_usecases, .num_usecases = ARRAY_SIZE(shell_client_usecases), .name = "test-client", }; static void msm_bus_dbg_init_vectors(void) { init_vectors[0].src = -1; init_vectors[0].dst = -1; init_vectors[0].ab = 0; init_vectors[0].ib = 0; current_vectors[0].src = -1; current_vectors[0].dst = -1; current_vectors[0].ab = 0; current_vectors[0].ib = 0; requested_vectors[0].src = -1; requested_vectors[0].dst = -1; requested_vectors[0].ab = 0; requested_vectors[0].ib = 0; clstate.enable = 0; clstate.current_index = 0; } static int msm_bus_dbg_update_cl_request(uint32_t cl) { int ret = 0; if (clstate.current_index < 2) clstate.current_index = 2; else { clstate.current_index = 1; current_vectors[0].ab = requested_vectors[0].ab; current_vectors[0].ib = requested_vectors[0].ib; } if (clstate.enable) { MSM_BUS_DBG("Updating request for shell client, index: %d\n", clstate.current_index); ret = msm_bus_scale_client_update_request(clstate.cl, clstate.current_index); } else MSM_BUS_DBG("Enable bit not set. Skipping update request\n"); return ret; } static void msm_bus_dbg_unregister_client(uint32_t cl) { MSM_BUS_DBG("Unregistering shell client\n"); msm_bus_scale_unregister_client(clstate.cl); clstate.cl = 0; } static uint32_t msm_bus_dbg_register_client(void) { int ret = 0; if (init_vectors[0].src != requested_vectors[0].src) { MSM_BUS_DBG("Shell client master changed. Unregistering\n"); msm_bus_dbg_unregister_client(clstate.cl); } if (init_vectors[0].dst != requested_vectors[0].dst) { MSM_BUS_DBG("Shell client slave changed. Unregistering\n"); msm_bus_dbg_unregister_client(clstate.cl); } if (!clstate.enable) { MSM_BUS_DBG("Enable bit not set, skipping registration: cl " "%d\n", clstate.cl); return 0; } if (clstate.cl) { MSM_BUS_DBG("Client registered, skipping registration\n"); return 0; } current_vectors[0].src = init_vectors[0].src; requested_vectors[0].src = init_vectors[0].src; current_vectors[0].dst = init_vectors[0].dst; requested_vectors[0].dst = init_vectors[0].dst; MSM_BUS_DBG("Registering shell client\n"); ret = msm_bus_scale_register_client(&shell_client); return ret; } static int msm_bus_dbg_mas_get(void *data, u64 *val) { *val = init_vectors[0].src; MSM_BUS_DBG("Get master: %llu\n", *val); return 0; } static int msm_bus_dbg_mas_set(void *data, u64 val) { init_vectors[0].src = val; MSM_BUS_DBG("Set master: %llu\n", val); clstate.cl = msm_bus_dbg_register_client(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get, msm_bus_dbg_mas_set, "%llu\n"); static int msm_bus_dbg_slv_get(void *data, u64 *val) { *val = init_vectors[0].dst; MSM_BUS_DBG("Get slave: %llu\n", *val); return 0; } static int msm_bus_dbg_slv_set(void *data, u64 val) { init_vectors[0].dst = val; MSM_BUS_DBG("Set slave: %llu\n", val); clstate.cl = msm_bus_dbg_register_client(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get, msm_bus_dbg_slv_set, "%llu\n"); static int msm_bus_dbg_ab_get(void *data, u64 *val) { *val = requested_vectors[0].ab; MSM_BUS_DBG("Get ab: %llu\n", *val); return 0; } static int msm_bus_dbg_ab_set(void *data, u64 val) { requested_vectors[0].ab = val; MSM_BUS_DBG("Set ab: %llu\n", val); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get, msm_bus_dbg_ab_set, "%llu\n"); static int msm_bus_dbg_ib_get(void *data, u64 *val) { *val = requested_vectors[0].ib; MSM_BUS_DBG("Get ib: %llu\n", *val); return 0; } static int msm_bus_dbg_ib_set(void *data, u64 val) { requested_vectors[0].ib = val; MSM_BUS_DBG("Set ib: %llu\n", val); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get, msm_bus_dbg_ib_set, "%llu\n"); static int msm_bus_dbg_en_get(void *data, u64 *val) { *val = clstate.enable; MSM_BUS_DBG("Get enable: %llu\n", *val); return 0; } static int msm_bus_dbg_en_set(void *data, u64 val) { int ret = 0; clstate.enable = val; if (clstate.enable) { if (!clstate.cl) { MSM_BUS_DBG("client: %u\n", clstate.cl); clstate.cl = msm_bus_dbg_register_client(); if (clstate.cl) ret = msm_bus_dbg_update_cl_request(clstate.cl); } else { MSM_BUS_DBG("update request for cl: %u\n", clstate.cl); ret = msm_bus_dbg_update_cl_request(clstate.cl); } } MSM_BUS_DBG("Set enable: %llu\n", val); return ret; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get, msm_bus_dbg_en_set, "%llu\n"); /** * The following funtions are used for viewing the client data * and changing the client request at run-time */ static ssize_t client_data_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int bsize = 0; uint32_t cl = (uint32_t)file->private_data; struct msm_bus_cldata *cldata = NULL; list_for_each_entry(cldata, &cl_list, list) { if (cldata->clid == cl) break; } bsize = cldata->size; return simple_read_from_buffer(buf, count, ppos, cldata->buffer, bsize); } static int client_data_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static const struct file_operations client_data_fops = { .open = client_data_open, .read = client_data_read, }; struct dentry *msm_bus_dbg_create(const char *name, mode_t mode, struct dentry *dent, uint32_t clid) { if (dent == NULL) { MSM_BUS_DBG("debugfs not ready yet\n"); return NULL; } return debugfs_create_file(name, mode, dent, (void *)clid, &client_data_fops); } static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata, int index, uint32_t clid, struct dentry *file) { struct msm_bus_cldata *cldata; cldata = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL); if (!cldata) { MSM_BUS_DBG("Failed to allocate memory for client data\n"); return -ENOMEM; } cldata->pdata = pdata; cldata->index = index; cldata->clid = clid; cldata->file = file; cldata->size = 0; list_add_tail(&cldata->list, &cl_list); return 0; } static void msm_bus_dbg_free_client(uint32_t clid) { struct msm_bus_cldata *cldata = NULL; list_for_each_entry(cldata, &cl_list, list) { if (cldata->clid == clid) { debugfs_remove(cldata->file); list_del(&cldata->list); kfree(cldata); break; } } } static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata, int index, uint32_t clid) { int i = 0, j; char *buf = NULL; struct msm_bus_cldata *cldata = NULL; struct timespec ts; list_for_each_entry(cldata, &cl_list, list) { if (cldata->clid == clid) break; } if (cldata->file == NULL) { if (pdata->name == NULL) { MSM_BUS_DBG("Client doesn't have a name\n"); return -EINVAL; } cldata->file = msm_bus_dbg_create(pdata->name, S_IRUGO, clients, clid); } if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT)) i = cldata->size; else { i = 0; cldata->size = 0; } buf = cldata->buffer; ts = ktime_to_timespec(ktime_get()); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n", (int)ts.tv_sec, (int)ts.tv_nsec); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr : %d\n", index); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->usecase[index].vectors[j].src); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->usecase[index].vectors[j].dst); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab : "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%u ", pdata->usecase[index].vectors[j].ab); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib : "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%u ", pdata->usecase[index].vectors[j].ib); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n"); cldata->size = i; return i; } static int msm_bus_dbg_update_request(struct msm_bus_cldata *cldata, int index) { int ret = 0; if ((index < 0) || (index > cldata->pdata->num_usecases)) { MSM_BUS_DBG("Invalid index!\n"); return -EINVAL; } ret = msm_bus_scale_client_update_request(cldata->clid, index); return ret; } static ssize_t msm_bus_dbg_update_request_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct msm_bus_cldata *cldata; unsigned long index = 0; int ret = 0; char *chid; char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL); if (!buf || IS_ERR(buf)) { MSM_BUS_ERR("Memory allocation for buffer failed\n"); return -ENOMEM; } if (cnt == 0) return 0; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = '\0'; chid = buf; MSM_BUS_DBG("buffer: %s\n size: %d\n", buf, sizeof(ubuf)); list_for_each_entry(cldata, &cl_list, list) { if (strstr(chid, cldata->pdata->name)) { cldata = cldata; strsep(&chid, " "); if (chid) { ret = strict_strtoul(chid, 10, &index); if (ret) { MSM_BUS_DBG("Index conversion" " failed\n"); return -EFAULT; } } else MSM_BUS_DBG("Error parsing input. Index not" " found\n"); break; } } msm_bus_dbg_update_request(cldata, index); kfree(buf); return cnt; } /** * The following funtions are used for viewing the commit data * for each fabric */ static ssize_t fabric_data_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct msm_bus_fab_list *fablist = NULL; int bsize = 0; ssize_t ret; const char *name = file->private_data; mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { if (strcmp(fablist->name, name) == 0) break; } bsize = fablist->size; ret = simple_read_from_buffer(buf, count, ppos, fablist->buffer, bsize); mutex_unlock(&msm_bus_dbg_fablist_lock); return ret; } static const struct file_operations fabric_data_fops = { .open = client_data_open, .read = fabric_data_read, }; static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file) { struct msm_bus_fab_list *fablist; int ret = 0; mutex_lock(&msm_bus_dbg_fablist_lock); fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL); if (!fablist) { MSM_BUS_DBG("Failed to allocate memory for commit data\n"); ret = -ENOMEM; goto err; } fablist->name = fabname; fablist->size = 0; list_add_tail(&fablist->list, &fabdata_list); err: mutex_unlock(&msm_bus_dbg_fablist_lock); return ret; } static void msm_bus_dbg_free_fabric(const char *fabname) { struct msm_bus_fab_list *fablist = NULL; mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { if (strcmp(fablist->name, fabname) == 0) { debugfs_remove(fablist->file); list_del(&fablist->list); kfree(fablist); break; } } mutex_unlock(&msm_bus_dbg_fablist_lock); } static int msm_bus_dbg_fill_fab_buffer(const char *fabname, void *cdata, int nmasters, int nslaves, int ntslaves) { int i; char *buf = NULL; struct msm_bus_fab_list *fablist = NULL; struct timespec ts; mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { if (strcmp(fablist->name, fabname) == 0) break; } if (fablist->file == NULL) { MSM_BUS_DBG("Fabric dbg entry does not exist\n"); mutex_unlock(&msm_bus_dbg_fablist_lock); return -EFAULT; } if (fablist->size < MAX_BUFF_SIZE - 256) i = fablist->size; else { i = 0; fablist->size = 0; } buf = fablist->buffer; mutex_unlock(&msm_bus_dbg_fablist_lock); ts = ktime_to_timespec(ktime_get()); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n", (int)ts.tv_sec, (int)ts.tv_nsec); msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata, nmasters, nslaves, ntslaves); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n"); mutex_lock(&msm_bus_dbg_fablist_lock); fablist->size = i; mutex_unlock(&msm_bus_dbg_fablist_lock); return 0; } static const struct file_operations msm_bus_dbg_update_request_fops = { .open = client_data_open, .write = msm_bus_dbg_update_request_write, }; /** * msm_bus_dbg_client_data() - Add debug data for clients * @pdata: Platform data of the client * @index: The current index or operation to be performed * @clid: Client handle obtained during registration */ void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index, uint32_t clid) { struct dentry *file = NULL; if (index == MSM_BUS_DBG_REGISTER) { msm_bus_dbg_record_client(pdata, index, clid, file); if (!pdata->name) { MSM_BUS_DBG("Cannot create debugfs entry. Null name\n"); return; } } else if (index == MSM_BUS_DBG_UNREGISTER) { msm_bus_dbg_free_client(clid); MSM_BUS_DBG("Client %d unregistered\n", clid); } else msm_bus_dbg_fill_cl_buffer(pdata, index, clid); } EXPORT_SYMBOL(msm_bus_dbg_client_data); /** * msm_bus_dbg_commit_data() - Add commit data from fabrics * @fabname: Fabric name specified in platform data * @cdata: Commit Data * @nmasters: Number of masters attached to fabric * @nslaves: Number of slaves attached to fabric * @ntslaves: Number of tiered slaves attached to fabric * @op: Operation to be performed */ void msm_bus_dbg_commit_data(const char *fabname, void *cdata, int nmasters, int nslaves, int ntslaves, int op) { struct dentry *file = NULL; if (op == MSM_BUS_DBG_REGISTER) msm_bus_dbg_record_fabric(fabname, file); else if (op == MSM_BUS_DBG_UNREGISTER) msm_bus_dbg_free_fabric(fabname); else msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters, nslaves, ntslaves); } EXPORT_SYMBOL(msm_bus_dbg_commit_data); static int __init msm_bus_debugfs_init(void) { struct dentry *commit, *shell_client; struct msm_bus_fab_list *fablist; struct msm_bus_cldata *cldata = NULL; uint64_t val = 0; dir = debugfs_create_dir("msm-bus-dbg", NULL); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create msm-bus-dbg\n"); goto err; } clients = debugfs_create_dir("client-data", dir); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create clients\n"); goto err; } shell_client = debugfs_create_dir("shell-client", dir); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create clients\n"); goto err; } commit = debugfs_create_dir("commit-data", dir); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create commit\n"); goto err; } if (debugfs_create_file("update_request", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_en_fops) == NULL) goto err; if (debugfs_create_file("ib", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_ib_fops) == NULL) goto err; if (debugfs_create_file("ab", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_ab_fops) == NULL) goto err; if (debugfs_create_file("slv", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_slv_fops) == NULL) goto err; if (debugfs_create_file("mas", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_mas_fops) == NULL) goto err; if (debugfs_create_file("update-request", S_IRUGO | S_IWUSR, clients, NULL, &msm_bus_dbg_update_request_fops) == NULL) goto err; list_for_each_entry(cldata, &cl_list, list) { if (cldata->pdata->name == NULL) { MSM_BUS_DBG("Client name not found\n"); continue; } cldata->file = msm_bus_dbg_create(cldata-> pdata->name, S_IRUGO, clients, cldata->clid); } mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { fablist->file = debugfs_create_file(fablist->name, S_IRUGO, commit, (void *)fablist->name, &fabric_data_fops); if (fablist->file == NULL) { MSM_BUS_DBG("Cannot create files for commit data\n"); goto err; } } mutex_unlock(&msm_bus_dbg_fablist_lock); msm_bus_dbg_init_vectors(); return 0; err: debugfs_remove_recursive(dir); return -ENODEV; } late_initcall(msm_bus_debugfs_init); static void __exit msm_bus_dbg_teardown(void) { struct msm_bus_fab_list *fablist = NULL, *fablist_temp; struct msm_bus_cldata *cldata = NULL, *cldata_temp; debugfs_remove_recursive(dir); list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) { list_del(&cldata->list); kfree(cldata); } mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) { list_del(&fablist->list); kfree(fablist); } mutex_unlock(&msm_bus_dbg_fablist_lock); } module_exit(msm_bus_dbg_teardown); MODULE_DESCRIPTION("Debugfs for msm bus scaling client"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Gagan Mac <gmac@codeaurora.org>");
gpl-2.0
chtq/ucore_lab
labcodes/lab4/libs/printfmt.c
403
9735
#include <defs.h> #include <x86.h> #include <error.h> #include <stdio.h> #include <string.h> /* * * Space or zero padding and a field width are supported for the numeric * formats only. * * The special format %e takes an integer error code * and prints a string describing the error. * The integer may be positive or negative, * so that -E_NO_MEM and E_NO_MEM are equivalent. * */ static const char * const error_string[MAXERROR + 1] = { [0] NULL, [E_UNSPECIFIED] "unspecified error", [E_BAD_PROC] "bad process", [E_INVAL] "invalid parameter", [E_NO_MEM] "out of memory", [E_NO_FREE_PROC] "out of processes", [E_FAULT] "segmentation fault", }; /* * * printnum - print a number (base <= 16) in reverse order * @putch: specified putch function, print a single character * @putdat: used by @putch function * @num: the number will be printed * @base: base for print, must be in [1, 16] * @width: maximum number of digits, if the actual width is less than @width, use @padc instead * @padc: character that padded on the left if the actual width is less than @width * */ static void printnum(void (*putch)(int, void*), void *putdat, unsigned long long num, unsigned base, int width, int padc) { unsigned long long result = num; unsigned mod = do_div(result, base); // first recursively print all preceding (more significant) digits if (num >= base) { printnum(putch, putdat, result, base, width - 1, padc); } else { // print any needed pad characters before first digit while (-- width > 0) putch(padc, putdat); } // then print this (the least significant) digit putch("0123456789abcdef"[mod], putdat); } /* * * getuint - get an unsigned int of various possible sizes from a varargs list * @ap: a varargs list pointer * @lflag: determines the size of the vararg that @ap points to * */ static unsigned long long getuint(va_list *ap, int lflag) { if (lflag >= 2) { return va_arg(*ap, unsigned long long); } else if (lflag) { return va_arg(*ap, unsigned long); } else { return va_arg(*ap, unsigned int); } } /* * * getint - same as getuint but signed, we can't use getuint because of sign extension * @ap: a varargs list pointer * @lflag: determines the size of the vararg that @ap points to * */ static long long getint(va_list *ap, int lflag) { if (lflag >= 2) { return va_arg(*ap, long long); } else if (lflag) { return va_arg(*ap, long); } else { return va_arg(*ap, int); } } /* * * printfmt - format a string and print it by using putch * @putch: specified putch function, print a single character * @putdat: used by @putch function * @fmt: the format string to use * */ void printfmt(void (*putch)(int, void*), void *putdat, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vprintfmt(putch, putdat, fmt, ap); va_end(ap); } /* * * vprintfmt - format a string and print it by using putch, it's called with a va_list * instead of a variable number of arguments * @putch: specified putch function, print a single character * @putdat: used by @putch function * @fmt: the format string to use * @ap: arguments for the format string * * Call this function if you are already dealing with a va_list. * Or you probably want printfmt() instead. * */ void vprintfmt(void (*putch)(int, void*), void *putdat, const char *fmt, va_list ap) { register const char *p; register int ch, err; unsigned long long num; int base, width, precision, lflag, altflag; while (1) { while ((ch = *(unsigned char *)fmt ++) != '%') { if (ch == '\0') { return; } putch(ch, putdat); } // Process a %-escape sequence char padc = ' '; width = precision = -1; lflag = altflag = 0; reswitch: switch (ch = *(unsigned char *)fmt ++) { // flag to pad on the right case '-': padc = '-'; goto reswitch; // flag to pad with 0's instead of spaces case '0': padc = '0'; goto reswitch; // width field case '1' ... '9': for (precision = 0; ; ++ fmt) { precision = precision * 10 + ch - '0'; ch = *fmt; if (ch < '0' || ch > '9') { break; } } goto process_precision; case '*': precision = va_arg(ap, int); goto process_precision; case '.': if (width < 0) width = 0; goto reswitch; case '#': altflag = 1; goto reswitch; process_precision: if (width < 0) width = precision, precision = -1; goto reswitch; // long flag (doubled for long long) case 'l': lflag ++; goto reswitch; // character case 'c': putch(va_arg(ap, int), putdat); break; // error message case 'e': err = va_arg(ap, int); if (err < 0) { err = -err; } if (err > MAXERROR || (p = error_string[err]) == NULL) { printfmt(putch, putdat, "error %d", err); } else { printfmt(putch, putdat, "%s", p); } break; // string case 's': if ((p = va_arg(ap, char *)) == NULL) { p = "(null)"; } if (width > 0 && padc != '-') { for (width -= strnlen(p, precision); width > 0; width --) { putch(padc, putdat); } } for (; (ch = *p ++) != '\0' && (precision < 0 || -- precision >= 0); width --) { if (altflag && (ch < ' ' || ch > '~')) { putch('?', putdat); } else { putch(ch, putdat); } } for (; width > 0; width --) { putch(' ', putdat); } break; // (signed) decimal case 'd': num = getint(&ap, lflag); if ((long long)num < 0) { putch('-', putdat); num = -(long long)num; } base = 10; goto number; // unsigned decimal case 'u': num = getuint(&ap, lflag); base = 10; goto number; // (unsigned) octal case 'o': num = getuint(&ap, lflag); base = 8; goto number; // pointer case 'p': putch('0', putdat); putch('x', putdat); num = (unsigned long long)(uintptr_t)va_arg(ap, void *); base = 16; goto number; // (unsigned) hexadecimal case 'x': num = getuint(&ap, lflag); base = 16; number: printnum(putch, putdat, num, base, width, padc); break; // escaped '%' character case '%': putch(ch, putdat); break; // unrecognized escape sequence - just print it literally default: putch('%', putdat); for (fmt --; fmt[-1] != '%'; fmt --) /* do nothing */; break; } } } /* sprintbuf is used to save enough information of a buffer */ struct sprintbuf { char *buf; // address pointer points to the first unused memory char *ebuf; // points the end of the buffer int cnt; // the number of characters that have been placed in this buffer }; /* * * sprintputch - 'print' a single character in a buffer * @ch: the character will be printed * @b: the buffer to place the character @ch * */ static void sprintputch(int ch, struct sprintbuf *b) { b->cnt ++; if (b->buf < b->ebuf) { *b->buf ++ = ch; } } /* * * snprintf - format a string and place it in a buffer * @str: the buffer to place the result into * @size: the size of buffer, including the trailing null space * @fmt: the format string to use * */ int snprintf(char *str, size_t size, const char *fmt, ...) { va_list ap; int cnt; va_start(ap, fmt); cnt = vsnprintf(str, size, fmt, ap); va_end(ap); return cnt; } /* * * vsnprintf - format a string and place it in a buffer, it's called with a va_list * instead of a variable number of arguments * @str: the buffer to place the result into * @size: the size of buffer, including the trailing null space * @fmt: the format string to use * @ap: arguments for the format string * * The return value is the number of characters which would be generated for the * given input, excluding the trailing '\0'. * * Call this function if you are already dealing with a va_list. * Or you probably want snprintf() instead. * */ int vsnprintf(char *str, size_t size, const char *fmt, va_list ap) { struct sprintbuf b = {str, str + size - 1, 0}; if (str == NULL || b.buf > b.ebuf) { return -E_INVAL; } // print the string to the buffer vprintfmt((void*)sprintputch, &b, fmt, ap); // null terminate the buffer *b.buf = '\0'; return b.cnt; }
gpl-2.0
kbc-developers/android_kernel_samsung_klte
lib/genalloc.c
1427
11970
/* * Basic general purpose allocator for managing special purpose * memory, for example, memory that is not managed by the regular * kmalloc/kfree interface. Uses for this includes on-device special * memory, uncached memory etc. * * It is safe to use the allocator in NMI handlers and other special * unblockable contexts that could otherwise deadlock on locks. This * is implemented by using atomic operations and retries on any * conflicts. The disadvantage is that there may be livelocks in * extreme cases. For better scalability, one allocator can be used * for each CPU. * * The lockless operation only works if there is enough memory * available. If new memory is added to the pool a lock has to be * still taken. So any user relying on locklessness has to ensure * that sufficient memory is preallocated. * * The basic atomic operation of this allocator is cmpxchg on long. * On architectures that don't have NMI-safe cmpxchg implementation, * the allocator can NOT be used in NMI handler. So code uses the * allocator in NMI handler should depend on * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. * * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/slab.h> #include <linux/export.h> #include <linux/bitmap.h> #include <linux/rculist.h> #include <linux/interrupt.h> #include <linux/genalloc.h> #include <linux/vmalloc.h> static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) { unsigned long val, nval; nval = *addr; do { val = nval; if (val & mask_to_set) return -EBUSY; cpu_relax(); } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); return 0; } static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) { unsigned long val, nval; nval = *addr; do { val = nval; if ((val & mask_to_clear) != mask_to_clear) return -EBUSY; cpu_relax(); } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); return 0; } /* * bitmap_set_ll - set the specified number of bits at the specified position * @map: pointer to a bitmap * @start: a bit position in @map * @nr: number of bits to set * * Set @nr bits start from @start in @map lock-lessly. Several users * can set/clear the same bitmap simultaneously without lock. If two * users set the same bit, one user will return remain bits, otherwise * return 0. */ static int bitmap_set_ll(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); const int size = start + nr; int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); while (nr - bits_to_set >= 0) { if (set_bits_ll(p, mask_to_set)) return nr; nr -= bits_to_set; bits_to_set = BITS_PER_LONG; mask_to_set = ~0UL; p++; } if (nr) { mask_to_set &= BITMAP_LAST_WORD_MASK(size); if (set_bits_ll(p, mask_to_set)) return nr; } return 0; } /* * bitmap_clear_ll - clear the specified number of bits at the specified position * @map: pointer to a bitmap * @start: a bit position in @map * @nr: number of bits to set * * Clear @nr bits start from @start in @map lock-lessly. Several users * can set/clear the same bitmap simultaneously without lock. If two * users clear the same bit, one user will return remain bits, * otherwise return 0. */ static int bitmap_clear_ll(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); const int size = start + nr; int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); while (nr - bits_to_clear >= 0) { if (clear_bits_ll(p, mask_to_clear)) return nr; nr -= bits_to_clear; bits_to_clear = BITS_PER_LONG; mask_to_clear = ~0UL; p++; } if (nr) { mask_to_clear &= BITMAP_LAST_WORD_MASK(size); if (clear_bits_ll(p, mask_to_clear)) return nr; } return 0; } /** * gen_pool_create - create a new special memory pool * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents * @nid: node id of the node the pool structure should be allocated on, or -1 * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. */ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) { struct gen_pool *pool; pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); if (pool != NULL) { spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); pool->min_alloc_order = min_alloc_order; } return pool; } EXPORT_SYMBOL(gen_pool_create); /** * gen_pool_add_virt - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to * @virt: virtual starting address of memory chunk to add to pool * @phys: physical starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 * * Add a new chunk of special memory to the specified pool. * * Returns 0 on success or a -ve errno on failure. */ int gen_pool_add_virt(struct gen_pool *pool, u64 virt, phys_addr_t phys, size_t size, int nid) { struct gen_pool_chunk *chunk; int nbits = size >> pool->min_alloc_order; int nbytes = sizeof(struct gen_pool_chunk) + BITS_TO_LONGS(nbits) * sizeof(long); if (nbytes <= PAGE_SIZE) chunk = kmalloc_node(nbytes, __GFP_ZERO, nid); else chunk = vmalloc(nbytes); if (unlikely(chunk == NULL)) return -ENOMEM; if (nbytes > PAGE_SIZE) memset(chunk, 0, nbytes); chunk->phys_addr = phys; chunk->start_addr = virt; chunk->end_addr = virt + size; atomic_set(&chunk->avail, size); spin_lock(&pool->lock); list_add_rcu(&chunk->next_chunk, &pool->chunks); spin_unlock(&pool->lock); return 0; } EXPORT_SYMBOL(gen_pool_add_virt); /** * gen_pool_virt_to_phys - return the physical address of memory * @pool: pool to allocate from * @addr: starting address of memory * * Returns the physical address on success, or -1 on error. */ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, u64 addr) { struct gen_pool_chunk *chunk; phys_addr_t paddr = -1; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { if (addr >= chunk->start_addr && addr < chunk->end_addr) { paddr = chunk->phys_addr + (addr - chunk->start_addr); break; } } rcu_read_unlock(); return paddr; } EXPORT_SYMBOL(gen_pool_virt_to_phys); /** * gen_pool_destroy - destroy a special memory pool * @pool: pool to destroy * * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. */ void gen_pool_destroy(struct gen_pool *pool) { struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; int bit, end_bit; list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { int nbytes; chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); end_bit = (chunk->end_addr - chunk->start_addr) >> order; nbytes = sizeof(struct gen_pool_chunk) + BITS_TO_LONGS(end_bit) * sizeof(long); bit = find_next_bit(chunk->bits, end_bit, 0); BUG_ON(bit < end_bit); if (nbytes <= PAGE_SIZE) kfree(chunk); else vfree(chunk); } kfree(pool); return; } EXPORT_SYMBOL(gen_pool_destroy); /** * gen_pool_alloc_aligned - allocate special memory from the pool * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * @alignment_order: Order the allocated space should be * aligned to (eg. 20 means allocated space * must be aligned to 1MiB). * * Allocate the requested number of bytes from the specified pool. * Uses a first-fit algorithm. Can not be used in NMI handler on * architectures without NMI-safe cmpxchg implementation. */ u64 gen_pool_alloc_aligned(struct gen_pool *pool, size_t size, unsigned alignment_order) { struct gen_pool_chunk *chunk; u64 addr = 0, align_mask = 0; int order = pool->min_alloc_order; int nbits, start_bit = 0, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); #endif if (size == 0) return 0; if (alignment_order > order) align_mask = (1 << (alignment_order - order)) - 1; nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { unsigned long chunk_size; if (size > atomic_read(&chunk->avail)) continue; chunk_size = (chunk->end_addr - chunk->start_addr) >> order; retry: start_bit = bitmap_find_next_zero_area_off(chunk->bits, chunk_size, 0, nbits, align_mask, chunk->start_addr >> order); if (start_bit >= chunk_size) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); if (remain) { remain = bitmap_clear_ll(chunk->bits, start_bit, nbits - remain); BUG_ON(remain); goto retry; } addr = chunk->start_addr + ((u64)start_bit << order); size = nbits << pool->min_alloc_order; atomic_sub(size, &chunk->avail); break; } rcu_read_unlock(); return addr; } EXPORT_SYMBOL(gen_pool_alloc_aligned); /** * gen_pool_free - free allocated special memory back to the pool * @pool: pool to free to * @addr: starting address of memory to free back to pool * @size: size in bytes of memory to free * * Free previously allocated special memory back to the specified * pool. Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. */ void gen_pool_free(struct gen_pool *pool, u64 addr, size_t size) { struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; int start_bit, nbits, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); #endif nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { if (addr >= chunk->start_addr && addr < chunk->end_addr) { BUG_ON(addr + size > chunk->end_addr); start_bit = (addr - chunk->start_addr) >> order; remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); BUG_ON(remain); size = nbits << order; atomic_add(size, &chunk->avail); rcu_read_unlock(); return; } } rcu_read_unlock(); BUG(); } EXPORT_SYMBOL(gen_pool_free); /** * gen_pool_for_each_chunk - call func for every chunk of generic memory pool * @pool: the generic memory pool * @func: func to call * @data: additional data used by @func * * Call @func for every chunk of generic memory pool. The @func is * called with rcu_read_lock held. */ void gen_pool_for_each_chunk(struct gen_pool *pool, void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), void *data) { struct gen_pool_chunk *chunk; rcu_read_lock(); list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) func(pool, chunk, data); rcu_read_unlock(); } EXPORT_SYMBOL(gen_pool_for_each_chunk); /** * gen_pool_avail - get available free space of the pool * @pool: pool to get available free space * * Return available free space of the specified pool. */ size_t gen_pool_avail(struct gen_pool *pool) { struct gen_pool_chunk *chunk; size_t avail = 0; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) avail += atomic_read(&chunk->avail); rcu_read_unlock(); return avail; } EXPORT_SYMBOL_GPL(gen_pool_avail); /** * gen_pool_size - get size in bytes of memory managed by the pool * @pool: pool to get size * * Return size in bytes of memory managed by the pool. */ size_t gen_pool_size(struct gen_pool *pool) { struct gen_pool_chunk *chunk; size_t size = 0; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) size += chunk->end_addr - chunk->start_addr; rcu_read_unlock(); return size; } EXPORT_SYMBOL_GPL(gen_pool_size);
gpl-2.0
Dinjesk/android_kernel_oneplus_msm8996
drivers/net/wireless/b43/phy_lcn.c
1427
24637
/* Broadcom B43 wireless driver IEEE 802.11n LCN-PHY support Copyright (c) 2011 Rafał Miłecki <zajec5@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. This file incorporates work covered by the following copyright and permission notice: Copyright (c) 2010 Broadcom Corporation Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. */ #include <linux/slab.h> #include "b43.h" #include "phy_lcn.h" #include "tables_phy_lcn.h" #include "main.h" struct lcn_tx_gains { u16 gm_gain; u16 pga_gain; u16 pad_gain; u16 dac_gain; }; struct lcn_tx_iir_filter { u8 type; u16 values[16]; }; enum lcn_sense_type { B43_SENSE_TEMP, B43_SENSE_VBAT, }; /************************************************** * Radio 2064. **************************************************/ /* wlc_lcnphy_radio_2064_channel_tune_4313 */ static void b43_radio_2064_channel_setup(struct b43_wldev *dev) { u16 save[2]; b43_radio_set(dev, 0x09d, 0x4); b43_radio_write(dev, 0x09e, 0xf); /* Channel specific values in theory, in practice always the same */ b43_radio_write(dev, 0x02a, 0xb); b43_radio_maskset(dev, 0x030, ~0x3, 0xa); b43_radio_maskset(dev, 0x091, ~0x3, 0); b43_radio_maskset(dev, 0x038, ~0xf, 0x7); b43_radio_maskset(dev, 0x030, ~0xc, 0x8); b43_radio_maskset(dev, 0x05e, ~0xf, 0x8); b43_radio_maskset(dev, 0x05e, ~0xf0, 0x80); b43_radio_write(dev, 0x06c, 0x80); save[0] = b43_radio_read(dev, 0x044); save[1] = b43_radio_read(dev, 0x12b); b43_radio_set(dev, 0x044, 0x7); b43_radio_set(dev, 0x12b, 0xe); /* TODO */ b43_radio_write(dev, 0x040, 0xfb); b43_radio_write(dev, 0x041, 0x9a); b43_radio_write(dev, 0x042, 0xa3); b43_radio_write(dev, 0x043, 0x0c); /* TODO */ b43_radio_set(dev, 0x044, 0x0c); udelay(1); b43_radio_write(dev, 0x044, save[0]); b43_radio_write(dev, 0x12b, save[1]); if (dev->phy.rev == 1) { /* brcmsmac uses outdated 0x3 for 0x038 */ b43_radio_write(dev, 0x038, 0x0); b43_radio_write(dev, 0x091, 0x7); } } /* wlc_radio_2064_init */ static void b43_radio_2064_init(struct b43_wldev *dev) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_radio_write(dev, 0x09c, 0x0020); b43_radio_write(dev, 0x105, 0x0008); } else { /* TODO */ } b43_radio_write(dev, 0x032, 0x0062); b43_radio_write(dev, 0x033, 0x0019); b43_radio_write(dev, 0x090, 0x0010); b43_radio_write(dev, 0x010, 0x0000); if (dev->phy.rev == 1) { b43_radio_write(dev, 0x060, 0x007f); b43_radio_write(dev, 0x061, 0x0072); b43_radio_write(dev, 0x062, 0x007f); } b43_radio_write(dev, 0x01d, 0x0002); b43_radio_write(dev, 0x01e, 0x0006); b43_phy_write(dev, 0x4ea, 0x4688); b43_phy_maskset(dev, 0x4eb, ~0x7, 0x2); b43_phy_mask(dev, 0x4eb, ~0x01c0); b43_phy_maskset(dev, 0x46a, 0xff00, 0x19); b43_lcntab_write(dev, B43_LCNTAB16(0x00, 0x55), 0); b43_radio_mask(dev, 0x05b, (u16) ~0xff02); b43_radio_set(dev, 0x004, 0x40); b43_radio_set(dev, 0x120, 0x10); b43_radio_set(dev, 0x078, 0x80); b43_radio_set(dev, 0x129, 0x2); b43_radio_set(dev, 0x057, 0x1); b43_radio_set(dev, 0x05b, 0x2); /* TODO: wait for some bit to be set */ b43_radio_read(dev, 0x05c); b43_radio_mask(dev, 0x05b, (u16) ~0xff02); b43_radio_mask(dev, 0x057, (u16) ~0xff01); b43_phy_write(dev, 0x933, 0x2d6b); b43_phy_write(dev, 0x934, 0x2d6b); b43_phy_write(dev, 0x935, 0x2d6b); b43_phy_write(dev, 0x936, 0x2d6b); b43_phy_write(dev, 0x937, 0x016b); b43_radio_mask(dev, 0x057, (u16) ~0xff02); b43_radio_write(dev, 0x0c2, 0x006f); } /************************************************** * Various PHY ops **************************************************/ /* wlc_lcnphy_toggle_afe_pwdn */ static void b43_phy_lcn_afe_set_unset(struct b43_wldev *dev) { u16 afe_ctl2 = b43_phy_read(dev, B43_PHY_LCN_AFE_CTL2); u16 afe_ctl1 = b43_phy_read(dev, B43_PHY_LCN_AFE_CTL1); b43_phy_write(dev, B43_PHY_LCN_AFE_CTL2, afe_ctl2 | 0x1); b43_phy_write(dev, B43_PHY_LCN_AFE_CTL1, afe_ctl1 | 0x1); b43_phy_write(dev, B43_PHY_LCN_AFE_CTL2, afe_ctl2 & ~0x1); b43_phy_write(dev, B43_PHY_LCN_AFE_CTL1, afe_ctl1 & ~0x1); b43_phy_write(dev, B43_PHY_LCN_AFE_CTL2, afe_ctl2); b43_phy_write(dev, B43_PHY_LCN_AFE_CTL1, afe_ctl1); } /* wlc_lcnphy_get_pa_gain */ static u16 b43_phy_lcn_get_pa_gain(struct b43_wldev *dev) { return (b43_phy_read(dev, 0x4fb) & 0x7f00) >> 8; } /* wlc_lcnphy_set_dac_gain */ static void b43_phy_lcn_set_dac_gain(struct b43_wldev *dev, u16 dac_gain) { u16 dac_ctrl; dac_ctrl = b43_phy_read(dev, 0x439); dac_ctrl = dac_ctrl & 0xc7f; dac_ctrl = dac_ctrl | (dac_gain << 7); b43_phy_maskset(dev, 0x439, ~0xfff, dac_ctrl); } /* wlc_lcnphy_set_bbmult */ static void b43_phy_lcn_set_bbmult(struct b43_wldev *dev, u8 m0) { b43_lcntab_write(dev, B43_LCNTAB16(0x00, 0x57), m0 << 8); } /* wlc_lcnphy_clear_tx_power_offsets */ static void b43_phy_lcn_clear_tx_power_offsets(struct b43_wldev *dev) { u8 i; if (1) { /* FIXME */ b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, (0x7 << 10) | 0x340); for (i = 0; i < 30; i++) { b43_phy_write(dev, B43_PHY_LCN_TABLE_DATAHI, 0); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, 0); } } b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, (0x7 << 10) | 0x80); for (i = 0; i < 64; i++) { b43_phy_write(dev, B43_PHY_LCN_TABLE_DATAHI, 0); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, 0); } } /* wlc_lcnphy_rev0_baseband_init */ static void b43_phy_lcn_rev0_baseband_init(struct b43_wldev *dev) { b43_radio_write(dev, 0x11c, 0); b43_phy_write(dev, 0x43b, 0); b43_phy_write(dev, 0x43c, 0); b43_phy_write(dev, 0x44c, 0); b43_phy_write(dev, 0x4e6, 0); b43_phy_write(dev, 0x4f9, 0); b43_phy_write(dev, 0x4b0, 0); b43_phy_write(dev, 0x938, 0); b43_phy_write(dev, 0x4b0, 0); b43_phy_write(dev, 0x44e, 0); b43_phy_set(dev, 0x567, 0x03); b43_phy_set(dev, 0x44a, 0x44); b43_phy_write(dev, 0x44a, 0x80); if (!(dev->dev->bus_sprom->boardflags_lo & B43_BFL_FEM)) ; /* TODO */ b43_phy_maskset(dev, 0x634, ~0xff, 0xc); if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_FEM) { b43_phy_maskset(dev, 0x634, ~0xff, 0xa); b43_phy_write(dev, 0x910, 0x1); } b43_phy_write(dev, 0x910, 0x1); b43_phy_maskset(dev, 0x448, ~0x300, 0x100); b43_phy_maskset(dev, 0x608, ~0xff, 0x17); b43_phy_maskset(dev, 0x604, ~0x7ff, 0x3ea); } /* wlc_lcnphy_bu_tweaks */ static void b43_phy_lcn_bu_tweaks(struct b43_wldev *dev) { b43_phy_set(dev, 0x805, 0x1); b43_phy_maskset(dev, 0x42f, ~0x7, 0x3); b43_phy_maskset(dev, 0x030, ~0x7, 0x3); b43_phy_write(dev, 0x414, 0x1e10); b43_phy_write(dev, 0x415, 0x0640); b43_phy_maskset(dev, 0x4df, (u16) ~0xff00, 0xf700); b43_phy_set(dev, 0x44a, 0x44); b43_phy_write(dev, 0x44a, 0x80); b43_phy_maskset(dev, 0x434, ~0xff, 0xfd); b43_phy_maskset(dev, 0x420, ~0xff, 0x10); if (dev->dev->bus_sprom->board_rev >= 0x1204) b43_radio_set(dev, 0x09b, 0xf0); b43_phy_write(dev, 0x7d6, 0x0902); b43_phy_maskset(dev, 0x429, ~0xf, 0x9); b43_phy_maskset(dev, 0x429, ~(0x3f << 4), 0xe << 4); if (dev->phy.rev == 1) { b43_phy_maskset(dev, 0x423, ~0xff, 0x46); b43_phy_maskset(dev, 0x411, ~0xff, 1); b43_phy_set(dev, 0x434, 0xff); /* FIXME: update to wl */ /* TODO: wl operates on PHY 0x416, brcmsmac is outdated here */ b43_phy_maskset(dev, 0x656, ~0xf, 2); b43_phy_set(dev, 0x44d, 4); b43_radio_set(dev, 0x0f7, 0x4); b43_radio_mask(dev, 0x0f1, ~0x3); b43_radio_maskset(dev, 0x0f2, ~0xf8, 0x90); b43_radio_maskset(dev, 0x0f3, ~0x3, 0x2); b43_radio_maskset(dev, 0x0f3, ~0xf0, 0xa0); b43_radio_set(dev, 0x11f, 0x2); b43_phy_lcn_clear_tx_power_offsets(dev); /* TODO: something more? */ } } /* wlc_lcnphy_vbat_temp_sense_setup */ static void b43_phy_lcn_sense_setup(struct b43_wldev *dev, enum lcn_sense_type sense_type) { u8 auxpga_vmidcourse, auxpga_vmidfine, auxpga_gain; u16 auxpga_vmid; u8 tx_pwr_idx; u8 i; u16 save_radio_regs[6][2] = { { 0x007, 0 }, { 0x0ff, 0 }, { 0x11f, 0 }, { 0x005, 0 }, { 0x025, 0 }, { 0x112, 0 }, }; u16 save_phy_regs[14][2] = { { 0x503, 0 }, { 0x4a4, 0 }, { 0x4d0, 0 }, { 0x4d9, 0 }, { 0x4da, 0 }, { 0x4a6, 0 }, { 0x938, 0 }, { 0x939, 0 }, { 0x4d8, 0 }, { 0x4d0, 0 }, { 0x4d7, 0 }, { 0x4a5, 0 }, { 0x40d, 0 }, { 0x4a2, 0 }, }; u16 save_radio_4a4; msleep(1); /* Save */ for (i = 0; i < 6; i++) save_radio_regs[i][1] = b43_radio_read(dev, save_radio_regs[i][0]); for (i = 0; i < 14; i++) save_phy_regs[i][1] = b43_phy_read(dev, save_phy_regs[i][0]); b43_mac_suspend(dev); save_radio_4a4 = b43_radio_read(dev, 0x4a4); /* wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); */ tx_pwr_idx = dev->phy.lcn->tx_pwr_curr_idx; /* Setup */ /* TODO: wlc_lcnphy_set_tx_pwr_by_index(pi, 127); */ b43_radio_set(dev, 0x007, 0x1); b43_radio_set(dev, 0x0ff, 0x10); b43_radio_set(dev, 0x11f, 0x4); b43_phy_mask(dev, 0x503, ~0x1); b43_phy_mask(dev, 0x503, ~0x4); b43_phy_mask(dev, 0x4a4, ~0x4000); b43_phy_mask(dev, 0x4a4, (u16) ~0x8000); b43_phy_mask(dev, 0x4d0, ~0x20); b43_phy_set(dev, 0x4a5, 0xff); b43_phy_maskset(dev, 0x4a5, ~0x7000, 0x5000); b43_phy_mask(dev, 0x4a5, ~0x700); b43_phy_maskset(dev, 0x40d, ~0xff, 64); b43_phy_maskset(dev, 0x40d, ~0x700, 0x600); b43_phy_maskset(dev, 0x4a2, ~0xff, 64); b43_phy_maskset(dev, 0x4a2, ~0x700, 0x600); b43_phy_maskset(dev, 0x4d9, ~0x70, 0x20); b43_phy_maskset(dev, 0x4d9, ~0x700, 0x300); b43_phy_maskset(dev, 0x4d9, ~0x7000, 0x1000); b43_phy_mask(dev, 0x4da, ~0x1000); b43_phy_set(dev, 0x4da, 0x2000); b43_phy_set(dev, 0x4a6, 0x8000); b43_radio_write(dev, 0x025, 0xc); b43_radio_set(dev, 0x005, 0x8); b43_phy_set(dev, 0x938, 0x4); b43_phy_set(dev, 0x939, 0x4); b43_phy_set(dev, 0x4a4, 0x1000); /* FIXME: don't hardcode */ b43_lcntab_write(dev, B43_LCNTAB16(0x8, 0x6), 0x640); switch (sense_type) { case B43_SENSE_TEMP: b43_phy_set(dev, 0x4d7, 0x8); b43_phy_maskset(dev, 0x4d7, ~0x7000, 0x1000); auxpga_vmidcourse = 8; auxpga_vmidfine = 0x4; auxpga_gain = 2; b43_radio_set(dev, 0x082, 0x20); break; case B43_SENSE_VBAT: b43_phy_set(dev, 0x4d7, 0x8); b43_phy_maskset(dev, 0x4d7, ~0x7000, 0x3000); auxpga_vmidcourse = 7; auxpga_vmidfine = 0xa; auxpga_gain = 2; break; } auxpga_vmid = (0x200 | (auxpga_vmidcourse << 4) | auxpga_vmidfine); b43_phy_set(dev, 0x4d8, 0x1); b43_phy_maskset(dev, 0x4d8, ~(0x3ff << 2), auxpga_vmid << 2); b43_phy_set(dev, 0x4d8, 0x2); b43_phy_maskset(dev, 0x4d8, ~(0x7 << 12), auxpga_gain << 12); b43_phy_set(dev, 0x4d0, 0x20); b43_radio_write(dev, 0x112, 0x6); b43_dummy_transmission(dev, true, false); /* Wait if not done */ if (!(b43_phy_read(dev, 0x476) & 0x8000)) udelay(10); /* Restore */ for (i = 0; i < 6; i++) b43_radio_write(dev, save_radio_regs[i][0], save_radio_regs[i][1]); for (i = 0; i < 14; i++) b43_phy_write(dev, save_phy_regs[i][0], save_phy_regs[i][1]); /* TODO: wlc_lcnphy_set_tx_pwr_by_index(tx_pwr_idx) */ b43_radio_write(dev, 0x4a4, save_radio_4a4); b43_mac_enable(dev); msleep(1); } static bool b43_phy_lcn_load_tx_iir_cck_filter(struct b43_wldev *dev, u8 filter_type) { int i, j; u16 phy_regs[] = { 0x910, 0x91e, 0x91f, 0x924, 0x925, 0x926, 0x920, 0x921, 0x927, 0x928, 0x929, 0x922, 0x923, 0x930, 0x931, 0x932 }; /* Table is from brcmsmac, values for type 25 were outdated, probably * others need updating too */ struct lcn_tx_iir_filter tx_iir_filters_cck[] = { { 0, { 1, 415, 1874, 64, 128, 64, 792, 1656, 64, 128, 64, 778, 1582, 64, 128, 64 } }, { 1, { 1, 402, 1847, 259, 59, 259, 671, 1794, 68, 54, 68, 608, 1863, 93, 167, 93 } }, { 2, { 1, 415, 1874, 64, 128, 64, 792, 1656, 192, 384, 192, 778, 1582, 64, 128, 64 } }, { 3, { 1, 302, 1841, 129, 258, 129, 658, 1720, 205, 410, 205, 754, 1760, 170, 340, 170 } }, { 20, { 1, 360, 1884, 242, 1734, 242, 752, 1720, 205, 1845, 205, 767, 1760, 256, 185, 256 } }, { 21, { 1, 360, 1884, 149, 1874, 149, 752, 1720, 205, 1883, 205, 767, 1760, 256, 273, 256 } }, { 22, { 1, 360, 1884, 98, 1948, 98, 752, 1720, 205, 1924, 205, 767, 1760, 256, 352, 256 } }, { 23, { 1, 350, 1884, 116, 1966, 116, 752, 1720, 205, 2008, 205, 767, 1760, 128, 233, 128 } }, { 24, { 1, 325, 1884, 32, 40, 32, 756, 1720, 256, 471, 256, 766, 1760, 256, 1881, 256 } }, { 25, { 1, 299, 1884, 51, 64, 51, 736, 1720, 256, 471, 256, 765, 1760, 262, 1878, 262 } }, /* brcmsmac version { 25, { 1, 299, 1884, 51, 64, 51, 736, 1720, * 256, 471, 256, 765, 1760, 256, 1881, 256 } }, */ { 26, { 1, 277, 1943, 39, 117, 88, 637, 1838, 64, 192, 144, 614, 1864, 128, 384, 288 } }, { 27, { 1, 245, 1943, 49, 147, 110, 626, 1838, 256, 768, 576, 613, 1864, 128, 384, 288 } }, { 30, { 1, 302, 1841, 61, 122, 61, 658, 1720, 205, 410, 205, 754, 1760, 170, 340, 170 } }, }; for (i = 0; i < ARRAY_SIZE(tx_iir_filters_cck); i++) { if (tx_iir_filters_cck[i].type == filter_type) { for (j = 0; j < 16; j++) b43_phy_write(dev, phy_regs[j], tx_iir_filters_cck[i].values[j]); return true; } } return false; } static bool b43_phy_lcn_load_tx_iir_ofdm_filter(struct b43_wldev *dev, u8 filter_type) { int i, j; u16 phy_regs[] = { 0x90f, 0x900, 0x901, 0x906, 0x907, 0x908, 0x902, 0x903, 0x909, 0x90a, 0x90b, 0x904, 0x905, 0x90c, 0x90d, 0x90e }; struct lcn_tx_iir_filter tx_iir_filters_ofdm[] = { { 0, { 0, 0xa2, 0x0, 0x100, 0x100, 0x0, 0x0, 0x0, 0x100, 0x0, 0x0, 0x278, 0xfea0, 0x80, 0x100, 0x80 } }, { 1, { 0, 374, 0xFF79, 16, 32, 16, 799, 0xFE74, 50, 32, 50, 750, 0xFE2B, 212, 0xFFCE, 212 } }, { 2, { 0, 375, 0xFF16, 37, 76, 37, 799, 0xFE74, 32, 20, 32, 748, 0xFEF2, 128, 0xFFE2, 128 } }, }; for (i = 0; i < ARRAY_SIZE(tx_iir_filters_ofdm); i++) { if (tx_iir_filters_ofdm[i].type == filter_type) { for (j = 0; j < 16; j++) b43_phy_write(dev, phy_regs[j], tx_iir_filters_ofdm[i].values[j]); return true; } } return false; } /* wlc_lcnphy_set_tx_gain_override */ static void b43_phy_lcn_set_tx_gain_override(struct b43_wldev *dev, bool enable) { b43_phy_maskset(dev, 0x4b0, ~(0x1 << 7), enable << 7); b43_phy_maskset(dev, 0x4b0, ~(0x1 << 14), enable << 14); b43_phy_maskset(dev, 0x43b, ~(0x1 << 6), enable << 6); } /* wlc_lcnphy_set_tx_gain */ static void b43_phy_lcn_set_tx_gain(struct b43_wldev *dev, struct lcn_tx_gains *target_gains) { u16 pa_gain = b43_phy_lcn_get_pa_gain(dev); b43_phy_write(dev, 0x4b5, (target_gains->gm_gain | (target_gains->pga_gain << 8))); b43_phy_maskset(dev, 0x4fb, ~0x7fff, (target_gains->pad_gain | (pa_gain << 8))); b43_phy_write(dev, 0x4fc, (target_gains->gm_gain | (target_gains->pga_gain << 8))); b43_phy_maskset(dev, 0x4fd, ~0x7fff, (target_gains->pad_gain | (pa_gain << 8))); b43_phy_lcn_set_dac_gain(dev, target_gains->dac_gain); b43_phy_lcn_set_tx_gain_override(dev, true); } /* wlc_lcnphy_tx_pwr_ctrl_init */ static void b43_phy_lcn_tx_pwr_ctl_init(struct b43_wldev *dev) { struct lcn_tx_gains tx_gains; u8 bbmult; b43_mac_suspend(dev); if (!dev->phy.lcn->hw_pwr_ctl_capable) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { tx_gains.gm_gain = 4; tx_gains.pga_gain = 12; tx_gains.pad_gain = 12; tx_gains.dac_gain = 0; bbmult = 150; } else { tx_gains.gm_gain = 7; tx_gains.pga_gain = 15; tx_gains.pad_gain = 14; tx_gains.dac_gain = 0; bbmult = 150; } b43_phy_lcn_set_tx_gain(dev, &tx_gains); b43_phy_lcn_set_bbmult(dev, bbmult); b43_phy_lcn_sense_setup(dev, B43_SENSE_TEMP); } else { b43err(dev->wl, "TX power control not supported for this HW\n"); } b43_mac_enable(dev); } /* wlc_lcnphy_txrx_spur_avoidance_mode */ static void b43_phy_lcn_txrx_spur_avoidance_mode(struct b43_wldev *dev, bool enable) { if (enable) { b43_phy_write(dev, 0x942, 0x7); b43_phy_write(dev, 0x93b, ((1 << 13) + 23)); b43_phy_write(dev, 0x93c, ((1 << 13) + 1989)); b43_phy_write(dev, 0x44a, 0x084); b43_phy_write(dev, 0x44a, 0x080); b43_phy_write(dev, 0x6d3, 0x2222); b43_phy_write(dev, 0x6d3, 0x2220); } else { b43_phy_write(dev, 0x942, 0x0); b43_phy_write(dev, 0x93b, ((0 << 13) + 23)); b43_phy_write(dev, 0x93c, ((0 << 13) + 1989)); } b43_mac_switch_freq(dev, enable); } /************************************************** * Channel switching ops. **************************************************/ /* wlc_lcnphy_set_chanspec_tweaks */ static void b43_phy_lcn_set_channel_tweaks(struct b43_wldev *dev, int channel) { struct bcma_drv_cc *cc = &dev->dev->bdev->bus->drv_cc; b43_phy_maskset(dev, 0x448, ~0x300, (channel == 14) ? 0x200 : 0x100); if (channel == 1 || channel == 2 || channel == 3 || channel == 4 || channel == 9 || channel == 10 || channel == 11 || channel == 12) { bcma_chipco_pll_write(cc, 0x2, 0x03000c04); bcma_chipco_pll_maskset(cc, 0x3, 0x00ffffff, 0x0); bcma_chipco_pll_write(cc, 0x4, 0x200005c0); bcma_cc_set32(cc, BCMA_CC_PMU_CTL, 0x400); b43_phy_write(dev, 0x942, 0); b43_phy_lcn_txrx_spur_avoidance_mode(dev, false); b43_phy_maskset(dev, 0x424, (u16) ~0xff00, 0x1b00); b43_phy_write(dev, 0x425, 0x5907); } else { bcma_chipco_pll_write(cc, 0x2, 0x03140c04); bcma_chipco_pll_maskset(cc, 0x3, 0x00ffffff, 0x333333); bcma_chipco_pll_write(cc, 0x4, 0x202c2820); bcma_cc_set32(cc, BCMA_CC_PMU_CTL, 0x400); b43_phy_write(dev, 0x942, 0); b43_phy_lcn_txrx_spur_avoidance_mode(dev, true); b43_phy_maskset(dev, 0x424, (u16) ~0xff00, 0x1f00); b43_phy_write(dev, 0x425, 0x590a); } b43_phy_set(dev, 0x44a, 0x44); b43_phy_write(dev, 0x44a, 0x80); } /* wlc_phy_chanspec_set_lcnphy */ static int b43_phy_lcn_set_channel(struct b43_wldev *dev, struct ieee80211_channel *channel, enum nl80211_channel_type channel_type) { static const u16 sfo_cfg[14][2] = { {965, 1087}, {967, 1085}, {969, 1082}, {971, 1080}, {973, 1078}, {975, 1076}, {977, 1073}, {979, 1071}, {981, 1069}, {983, 1067}, {985, 1065}, {987, 1063}, {989, 1060}, {994, 1055}, }; b43_phy_lcn_set_channel_tweaks(dev, channel->hw_value); b43_phy_set(dev, 0x44a, 0x44); b43_phy_write(dev, 0x44a, 0x80); b43_radio_2064_channel_setup(dev); mdelay(1); b43_phy_lcn_afe_set_unset(dev); b43_phy_write(dev, 0x657, sfo_cfg[channel->hw_value - 1][0]); b43_phy_write(dev, 0x658, sfo_cfg[channel->hw_value - 1][1]); if (channel->hw_value == 14) { b43_phy_maskset(dev, 0x448, ~(0x3 << 8), (2) << 8); b43_phy_lcn_load_tx_iir_cck_filter(dev, 3); } else { b43_phy_maskset(dev, 0x448, ~(0x3 << 8), (1) << 8); /* brcmsmac uses filter_type 2, we follow wl with 25 */ b43_phy_lcn_load_tx_iir_cck_filter(dev, 25); } /* brcmsmac uses filter_type 2, we follow wl with 0 */ b43_phy_lcn_load_tx_iir_ofdm_filter(dev, 0); b43_phy_maskset(dev, 0x4eb, ~(0x7 << 3), 0x1 << 3); return 0; } /************************************************** * Basic PHY ops. **************************************************/ static int b43_phy_lcn_op_allocate(struct b43_wldev *dev) { struct b43_phy_lcn *phy_lcn; phy_lcn = kzalloc(sizeof(*phy_lcn), GFP_KERNEL); if (!phy_lcn) return -ENOMEM; dev->phy.lcn = phy_lcn; return 0; } static void b43_phy_lcn_op_free(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_lcn *phy_lcn = phy->lcn; kfree(phy_lcn); phy->lcn = NULL; } static void b43_phy_lcn_op_prepare_structs(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_lcn *phy_lcn = phy->lcn; memset(phy_lcn, 0, sizeof(*phy_lcn)); } /* wlc_phy_init_lcnphy */ static int b43_phy_lcn_op_init(struct b43_wldev *dev) { struct bcma_drv_cc *cc = &dev->dev->bdev->bus->drv_cc; b43_phy_set(dev, 0x44a, 0x80); b43_phy_mask(dev, 0x44a, 0x7f); b43_phy_set(dev, 0x6d1, 0x80); b43_phy_write(dev, 0x6d0, 0x7); b43_phy_lcn_afe_set_unset(dev); b43_phy_write(dev, 0x60a, 0xa0); b43_phy_write(dev, 0x46a, 0x19); b43_phy_maskset(dev, 0x663, 0xFF00, 0x64); b43_phy_lcn_tables_init(dev); b43_phy_lcn_rev0_baseband_init(dev); b43_phy_lcn_bu_tweaks(dev); if (dev->phy.radio_ver == 0x2064) b43_radio_2064_init(dev); else B43_WARN_ON(1); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) b43_phy_lcn_tx_pwr_ctl_init(dev); b43_switch_channel(dev, dev->phy.channel); bcma_chipco_regctl_maskset(cc, 0, 0xf, 0x9); bcma_chipco_chipctl_maskset(cc, 0, 0, 0x03cddddd); /* TODO */ b43_phy_set(dev, 0x448, 0x4000); udelay(100); b43_phy_mask(dev, 0x448, ~0x4000); /* TODO */ return 0; } static void b43_phy_lcn_op_software_rfkill(struct b43_wldev *dev, bool blocked) { if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED) b43err(dev->wl, "MAC not suspended\n"); if (blocked) { b43_phy_mask(dev, B43_PHY_LCN_RF_CTL2, ~0x7c00); b43_phy_set(dev, B43_PHY_LCN_RF_CTL1, 0x1f00); b43_phy_mask(dev, B43_PHY_LCN_RF_CTL5, ~0x7f00); b43_phy_mask(dev, B43_PHY_LCN_RF_CTL4, ~0x2); b43_phy_set(dev, B43_PHY_LCN_RF_CTL3, 0x808); b43_phy_mask(dev, B43_PHY_LCN_RF_CTL7, ~0x8); b43_phy_set(dev, B43_PHY_LCN_RF_CTL6, 0x8); } else { b43_phy_mask(dev, B43_PHY_LCN_RF_CTL1, ~0x1f00); b43_phy_mask(dev, B43_PHY_LCN_RF_CTL3, ~0x808); b43_phy_mask(dev, B43_PHY_LCN_RF_CTL6, ~0x8); } } static void b43_phy_lcn_op_switch_analog(struct b43_wldev *dev, bool on) { if (on) { b43_phy_mask(dev, B43_PHY_LCN_AFE_CTL1, ~0x7); } else { b43_phy_set(dev, B43_PHY_LCN_AFE_CTL2, 0x7); b43_phy_set(dev, B43_PHY_LCN_AFE_CTL1, 0x7); } } static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev, unsigned int new_channel) { struct ieee80211_channel *channel = dev->wl->hw->conf.chandef.chan; enum nl80211_channel_type channel_type = cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { if ((new_channel < 1) || (new_channel > 14)) return -EINVAL; } else { return -EINVAL; } return b43_phy_lcn_set_channel(dev, channel, channel_type); } static unsigned int b43_phy_lcn_op_get_default_chan(struct b43_wldev *dev) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) return 1; return 36; } static enum b43_txpwr_result b43_phy_lcn_op_recalc_txpower(struct b43_wldev *dev, bool ignore_tssi) { return B43_TXPWR_RES_DONE; } static void b43_phy_lcn_op_adjust_txpower(struct b43_wldev *dev) { } /************************************************** * R/W ops. **************************************************/ static void b43_phy_lcn_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set); } static u16 b43_phy_lcn_op_radio_read(struct b43_wldev *dev, u16 reg) { /* LCN-PHY needs 0x200 for read access */ reg |= 0x200; b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO24_DATA); } static void b43_phy_lcn_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO24_DATA, value); } /************************************************** * PHY ops struct. **************************************************/ const struct b43_phy_operations b43_phyops_lcn = { .allocate = b43_phy_lcn_op_allocate, .free = b43_phy_lcn_op_free, .prepare_structs = b43_phy_lcn_op_prepare_structs, .init = b43_phy_lcn_op_init, .phy_maskset = b43_phy_lcn_op_maskset, .radio_read = b43_phy_lcn_op_radio_read, .radio_write = b43_phy_lcn_op_radio_write, .software_rfkill = b43_phy_lcn_op_software_rfkill, .switch_analog = b43_phy_lcn_op_switch_analog, .switch_channel = b43_phy_lcn_op_switch_channel, .get_default_chan = b43_phy_lcn_op_get_default_chan, .recalc_txpower = b43_phy_lcn_op_recalc_txpower, .adjust_txpower = b43_phy_lcn_op_adjust_txpower, };
gpl-2.0
LeJay/android_kernel_samsung_I9505G
kernel/events/hw_breakpoint.c
2195
16080
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) 2007 Alan Stern * Copyright (C) IBM Corporation, 2009 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> * * Thanks to Ingo Molnar for his many suggestions. * * Authors: Alan Stern <stern@rowland.harvard.edu> * K.Prasad <prasad@linux.vnet.ibm.com> * Frederic Weisbecker <fweisbec@gmail.com> */ /* * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, * using the CPU's debug registers. * This file contains the arch-independent routines. */ #include <linux/irqflags.h> #include <linux/kallsyms.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/hw_breakpoint.h> /* * Constraints data */ /* Number of pinned cpu breakpoints in a cpu */ static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); /* Number of pinned task breakpoints in a cpu */ static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]); /* Number of non-pinned cpu/task breakpoints in a cpu */ static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); static int nr_slots[TYPE_MAX]; /* Keep track of the breakpoints attached to tasks */ static LIST_HEAD(bp_task_head); static int constraints_initialized; /* Gather the number of total pinned and un-pinned bp in a cpuset */ struct bp_busy_slots { unsigned int pinned; unsigned int flexible; }; /* Serialize accesses to the above constraints */ static DEFINE_MUTEX(nr_bp_mutex); __weak int hw_breakpoint_weight(struct perf_event *bp) { return 1; } static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) { if (bp->attr.bp_type & HW_BREAKPOINT_RW) return TYPE_DATA; return TYPE_INST; } /* * Report the maximum number of pinned breakpoints a task * have in this cpu */ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) { int i; unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); for (i = nr_slots[type] - 1; i >= 0; i--) { if (tsk_pinned[i] > 0) return i + 1; } return 0; } /* * Count the number of breakpoints of the same type and same task. * The given event must be not on the list. */ static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) { struct task_struct *tsk = bp->hw.bp_target; struct perf_event *iter; int count = 0; list_for_each_entry(iter, &bp_task_head, hw.bp_list) { if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) count += hw_breakpoint_weight(iter); } return count; } /* * Report the number of pinned/un-pinned breakpoints we have in * a given cpu (cpu > -1) or in all of them (cpu = -1). */ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, enum bp_type_idx type) { int cpu = bp->cpu; struct task_struct *tsk = bp->hw.bp_target; if (cpu >= 0) { slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); if (!tsk) slots->pinned += max_task_bp_pinned(cpu, type); else slots->pinned += task_bp_pinned(bp, type); slots->flexible = per_cpu(nr_bp_flexible[type], cpu); return; } for_each_online_cpu(cpu) { unsigned int nr; nr = per_cpu(nr_cpu_bp_pinned[type], cpu); if (!tsk) nr += max_task_bp_pinned(cpu, type); else nr += task_bp_pinned(bp, type); if (nr > slots->pinned) slots->pinned = nr; nr = per_cpu(nr_bp_flexible[type], cpu); if (nr > slots->flexible) slots->flexible = nr; } } /* * For now, continue to consider flexible as pinned, until we can * ensure no flexible event can ever be scheduled before a pinned event * in a same cpu. */ static void fetch_this_slot(struct bp_busy_slots *slots, int weight) { slots->pinned += weight; } /* * Add a pinned breakpoint for the given task in our constraint table */ static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, enum bp_type_idx type, int weight) { unsigned int *tsk_pinned; int old_count = 0; int old_idx = 0; int idx = 0; old_count = task_bp_pinned(bp, type); old_idx = old_count - 1; idx = old_idx + weight; /* tsk_pinned[n] is the number of tasks having n breakpoints */ tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); if (enable) { tsk_pinned[idx]++; if (old_count > 0) tsk_pinned[old_idx]--; } else { tsk_pinned[idx]--; if (old_count > 0) tsk_pinned[old_idx]++; } } /* * Add/remove the given breakpoint in our constraint table */ static void toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int weight) { int cpu = bp->cpu; struct task_struct *tsk = bp->hw.bp_target; /* Pinned counter cpu profiling */ if (!tsk) { if (enable) per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; else per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; return; } /* Pinned counter task profiling */ if (!enable) list_del(&bp->hw.bp_list); if (cpu >= 0) { toggle_bp_task_slot(bp, cpu, enable, type, weight); } else { for_each_online_cpu(cpu) toggle_bp_task_slot(bp, cpu, enable, type, weight); } if (enable) list_add_tail(&bp->hw.bp_list, &bp_task_head); } /* * Function to perform processor-specific cleanup during unregistration */ __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) { /* * A weak stub function here for those archs that don't define * it inside arch/.../kernel/hw_breakpoint.c */ } /* * Contraints to check before allowing this new breakpoint counter: * * == Non-pinned counter == (Considered as pinned for now) * * - If attached to a single cpu, check: * * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM * * -> If there are already non-pinned counters in this cpu, it means * there is already a free slot for them. * Otherwise, we check that the maximum number of per task * breakpoints (for this cpu) plus the number of per cpu breakpoint * (for this cpu) doesn't cover every registers. * * - If attached to every cpus, check: * * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM * * -> This is roughly the same, except we check the number of per cpu * bp for every cpu and we keep the max one. Same for the per tasks * breakpoints. * * * == Pinned counter == * * - If attached to a single cpu, check: * * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM * * -> Same checks as before. But now the nr_bp_flexible, if any, must keep * one register at least (or they will never be fed). * * - If attached to every cpus, check: * * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM */ static int __reserve_bp_slot(struct perf_event *bp) { struct bp_busy_slots slots = {0}; enum bp_type_idx type; int weight; /* We couldn't initialize breakpoint constraints on boot */ if (!constraints_initialized) return -ENOMEM; /* Basic checks */ if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || bp->attr.bp_type == HW_BREAKPOINT_INVALID) return -EINVAL; type = find_slot_idx(bp); weight = hw_breakpoint_weight(bp); fetch_bp_busy_slots(&slots, bp, type); /* * Simulate the addition of this breakpoint to the constraints * and see the result. */ fetch_this_slot(&slots, weight); /* Flexible counters need to keep at least one slot */ if (slots.pinned + (!!slots.flexible) > nr_slots[type]) return -ENOSPC; toggle_bp_slot(bp, true, type, weight); return 0; } int reserve_bp_slot(struct perf_event *bp) { int ret; mutex_lock(&nr_bp_mutex); ret = __reserve_bp_slot(bp); mutex_unlock(&nr_bp_mutex); return ret; } static void __release_bp_slot(struct perf_event *bp) { enum bp_type_idx type; int weight; type = find_slot_idx(bp); weight = hw_breakpoint_weight(bp); toggle_bp_slot(bp, false, type, weight); } void release_bp_slot(struct perf_event *bp) { mutex_lock(&nr_bp_mutex); arch_unregister_hw_breakpoint(bp); __release_bp_slot(bp); mutex_unlock(&nr_bp_mutex); } /* * Allow the kernel debugger to reserve breakpoint slots without * taking a lock using the dbg_* variant of for the reserve and * release breakpoint slots. */ int dbg_reserve_bp_slot(struct perf_event *bp) { if (mutex_is_locked(&nr_bp_mutex)) return -1; return __reserve_bp_slot(bp); } int dbg_release_bp_slot(struct perf_event *bp) { if (mutex_is_locked(&nr_bp_mutex)) return -1; __release_bp_slot(bp); return 0; } static int validate_hw_breakpoint(struct perf_event *bp) { int ret; ret = arch_validate_hwbkpt_settings(bp); if (ret) return ret; if (arch_check_bp_in_kernelspace(bp)) { if (bp->attr.exclude_kernel) return -EINVAL; /* * Don't let unprivileged users set a breakpoint in the trap * path to avoid trap recursion attacks. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; } return 0; } int register_perf_hw_breakpoint(struct perf_event *bp) { int ret; ret = reserve_bp_slot(bp); if (ret) return ret; ret = validate_hw_breakpoint(bp); /* if arch_validate_hwbkpt_settings() fails then release bp slot */ if (ret) release_bp_slot(bp); return ret; } /** * register_user_hw_breakpoint - register a hardware breakpoint for user space * @attr: breakpoint attributes * @triggered: callback to trigger when we hit the breakpoint * @tsk: pointer to 'task_struct' of the process to which the address belongs */ struct perf_event * register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context, struct task_struct *tsk) { return perf_event_create_kernel_counter(attr, -1, tsk, triggered, context); } EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); /** * modify_user_hw_breakpoint - modify a user-space hardware breakpoint * @bp: the breakpoint structure to modify * @attr: new breakpoint attributes * @triggered: callback to trigger when we hit the breakpoint * @tsk: pointer to 'task_struct' of the process to which the address belongs */ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { u64 old_addr = bp->attr.bp_addr; u64 old_len = bp->attr.bp_len; int old_type = bp->attr.bp_type; int err = 0; perf_event_disable(bp); bp->attr.bp_addr = attr->bp_addr; bp->attr.bp_type = attr->bp_type; bp->attr.bp_len = attr->bp_len; if (attr->disabled) goto end; err = validate_hw_breakpoint(bp); if (!err) perf_event_enable(bp); if (err) { bp->attr.bp_addr = old_addr; bp->attr.bp_type = old_type; bp->attr.bp_len = old_len; if (!bp->attr.disabled) perf_event_enable(bp); return err; } end: bp->attr.disabled = attr->disabled; return 0; } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); /** * unregister_hw_breakpoint - unregister a user-space hardware breakpoint * @bp: the breakpoint structure to unregister */ void unregister_hw_breakpoint(struct perf_event *bp) { if (!bp) return; perf_event_release_kernel(bp); } EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); /** * register_wide_hw_breakpoint - register a wide breakpoint in the kernel * @attr: breakpoint attributes * @triggered: callback to trigger when we hit the breakpoint * * @return a set of per_cpu pointers to perf events */ struct perf_event * __percpu * register_wide_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context) { struct perf_event * __percpu *cpu_events, **pevent, *bp; long err; int cpu; cpu_events = alloc_percpu(typeof(*cpu_events)); if (!cpu_events) return (void __percpu __force *)ERR_PTR(-ENOMEM); get_online_cpus(); for_each_online_cpu(cpu) { pevent = per_cpu_ptr(cpu_events, cpu); bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered, context); *pevent = bp; if (IS_ERR(bp)) { err = PTR_ERR(bp); goto fail; } } put_online_cpus(); return cpu_events; fail: for_each_online_cpu(cpu) { pevent = per_cpu_ptr(cpu_events, cpu); if (IS_ERR(*pevent)) break; unregister_hw_breakpoint(*pevent); } put_online_cpus(); free_percpu(cpu_events); return (void __percpu __force *)ERR_PTR(err); } EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); /** * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel * @cpu_events: the per cpu set of events to unregister */ void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { int cpu; struct perf_event **pevent; for_each_possible_cpu(cpu) { pevent = per_cpu_ptr(cpu_events, cpu); unregister_hw_breakpoint(*pevent); } free_percpu(cpu_events); } EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); static struct notifier_block hw_breakpoint_exceptions_nb = { .notifier_call = hw_breakpoint_exceptions_notify, /* we need to be notified first */ .priority = 0x7fffffff }; static void bp_perf_event_destroy(struct perf_event *event) { release_bp_slot(event); } static int hw_breakpoint_event_init(struct perf_event *bp) { int err; if (bp->attr.type != PERF_TYPE_BREAKPOINT) return -ENOENT; /* * no branch sampling for breakpoint events */ if (has_branch_stack(bp)) return -EOPNOTSUPP; err = register_perf_hw_breakpoint(bp); if (err) return err; bp->destroy = bp_perf_event_destroy; return 0; } static int hw_breakpoint_add(struct perf_event *bp, int flags) { if (!(flags & PERF_EF_START)) bp->hw.state = PERF_HES_STOPPED; return arch_install_hw_breakpoint(bp); } static void hw_breakpoint_del(struct perf_event *bp, int flags) { arch_uninstall_hw_breakpoint(bp); } static void hw_breakpoint_start(struct perf_event *bp, int flags) { bp->hw.state = 0; } static void hw_breakpoint_stop(struct perf_event *bp, int flags) { bp->hw.state = PERF_HES_STOPPED; } static int hw_breakpoint_event_idx(struct perf_event *bp) { return 0; } static struct pmu perf_breakpoint = { .task_ctx_nr = perf_sw_context, /* could eventually get its own */ .event_init = hw_breakpoint_event_init, .add = hw_breakpoint_add, .del = hw_breakpoint_del, .start = hw_breakpoint_start, .stop = hw_breakpoint_stop, .read = hw_breakpoint_pmu_read, .event_idx = hw_breakpoint_event_idx, }; int __init init_hw_breakpoint(void) { unsigned int **task_bp_pinned; int cpu, err_cpu; int i; for (i = 0; i < TYPE_MAX; i++) nr_slots[i] = hw_breakpoint_slots(i); for_each_possible_cpu(cpu) { for (i = 0; i < TYPE_MAX; i++) { task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], GFP_KERNEL); if (!*task_bp_pinned) goto err_alloc; } } constraints_initialized = 1; perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT); return register_die_notifier(&hw_breakpoint_exceptions_nb); err_alloc: for_each_possible_cpu(err_cpu) { for (i = 0; i < TYPE_MAX; i++) kfree(per_cpu(nr_task_bp_pinned[i], cpu)); if (err_cpu == cpu) break; } return -ENOMEM; }
gpl-2.0
somcom3x/kernel_samsung_msm8660-common
arch/powerpc/sysdev/xics/icp-hv.c
2707
3623
/* * Copyright 2011 IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/irq.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/of.h> #include <asm/smp.h> #include <asm/irq.h> #include <asm/errno.h> #include <asm/xics.h> #include <asm/io.h> #include <asm/hvcall.h> static inline unsigned int icp_hv_get_xirr(unsigned char cppr) { unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; long rc; rc = plpar_hcall(H_XIRR, retbuf, cppr); if (rc != H_SUCCESS) panic(" bad return code xirr - rc = %lx\n", rc); return (unsigned int)retbuf[0]; } static inline void icp_hv_set_xirr(unsigned int value) { long rc = plpar_hcall_norets(H_EOI, value); if (rc != H_SUCCESS) panic("bad return code EOI - rc = %ld, value=%x\n", rc, value); } static inline void icp_hv_set_cppr(u8 value) { long rc = plpar_hcall_norets(H_CPPR, value); if (rc != H_SUCCESS) panic("bad return code cppr - rc = %lx\n", rc); } static inline void icp_hv_set_qirr(int n_cpu , u8 value) { long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu), value); if (rc != H_SUCCESS) panic("bad return code qirr - rc = %lx\n", rc); } static void icp_hv_eoi(struct irq_data *d) { unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); iosync(); icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq); } static void icp_hv_teardown_cpu(void) { int cpu = smp_processor_id(); /* Clear any pending IPI */ icp_hv_set_qirr(cpu, 0xff); } static void icp_hv_flush_ipi(void) { /* We take the ipi irq but and never return so we * need to EOI the IPI, but want to leave our priority 0 * * should we check all the other interrupts too? * should we be flagging idle loop instead? * or creating some task to be scheduled? */ icp_hv_set_xirr((0x00 << 24) | XICS_IPI); } static unsigned int icp_hv_get_irq(void) { unsigned int xirr = icp_hv_get_xirr(xics_cppr_top()); unsigned int vec = xirr & 0x00ffffff; unsigned int irq; if (vec == XICS_IRQ_SPURIOUS) return NO_IRQ; irq = irq_radix_revmap_lookup(xics_host, vec); if (likely(irq != NO_IRQ)) { xics_push_cppr(vec); return irq; } /* We don't have a linux mapping, so have rtas mask it. */ xics_mask_unknown_vec(vec); /* We might learn about it later, so EOI it */ icp_hv_set_xirr(xirr); return NO_IRQ; } static void icp_hv_set_cpu_priority(unsigned char cppr) { xics_set_base_cppr(cppr); icp_hv_set_cppr(cppr); iosync(); } #ifdef CONFIG_SMP static void icp_hv_cause_ipi(int cpu, unsigned long data) { icp_hv_set_qirr(cpu, IPI_PRIORITY); } static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id) { int cpu = smp_processor_id(); icp_hv_set_qirr(cpu, 0xff); return smp_ipi_demux(); } #endif /* CONFIG_SMP */ static const struct icp_ops icp_hv_ops = { .get_irq = icp_hv_get_irq, .eoi = icp_hv_eoi, .set_priority = icp_hv_set_cpu_priority, .teardown_cpu = icp_hv_teardown_cpu, .flush_ipi = icp_hv_flush_ipi, #ifdef CONFIG_SMP .ipi_action = icp_hv_ipi_action, .cause_ipi = icp_hv_cause_ipi, #endif }; int icp_hv_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp"); if (!np) np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation"); if (!np) return -ENODEV; icp_ops = &icp_hv_ops; return 0; }
gpl-2.0
tadeas482/android_kernel_u8500
arch/powerpc/platforms/512x/mpc512x_shared.c
2963
11799
/* * Copyright (C) 2007,2008 Freescale Semiconductor, Inc. All rights reserved. * * Author: John Rigby <jrigby@freescale.com> * * Description: * MPC512x Shared code * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/of_platform.h> #include <linux/fsl-diu-fb.h> #include <linux/bootmem.h> #include <sysdev/fsl_soc.h> #include <asm/cacheflush.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/prom.h> #include <asm/time.h> #include <asm/mpc5121.h> #include <asm/mpc52xx_psc.h> #include "mpc512x.h" static struct mpc512x_reset_module __iomem *reset_module_base; static void __init mpc512x_restart_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset"); if (!np) return; reset_module_base = of_iomap(np, 0); of_node_put(np); } void mpc512x_restart(char *cmd) { if (reset_module_base) { /* Enable software reset "RSTE" */ out_be32(&reset_module_base->rpr, 0x52535445); /* Set software hard reset */ out_be32(&reset_module_base->rcr, 0x2); } else { pr_err("Restart module not mapped.\n"); } for (;;) ; } struct fsl_diu_shared_fb { u8 gamma[0x300]; /* 32-bit aligned! */ struct diu_ad ad0; /* 32-bit aligned! */ phys_addr_t fb_phys; size_t fb_len; bool in_use; }; unsigned int mpc512x_get_pixel_format(unsigned int bits_per_pixel, int monitor_port) { switch (bits_per_pixel) { case 32: return 0x88883316; case 24: return 0x88082219; case 16: return 0x65053118; } return 0x00000400; } void mpc512x_set_gamma_table(int monitor_port, char *gamma_table_base) { } void mpc512x_set_monitor_port(int monitor_port) { } #define DIU_DIV_MASK 0x000000ff void mpc512x_set_pixel_clock(unsigned int pixclock) { unsigned long bestval, bestfreq, speed, busfreq; unsigned long minpixclock, maxpixclock, pixval; struct mpc512x_ccm __iomem *ccm; struct device_node *np; u32 temp; long err; int i; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-clock"); if (!np) { pr_err("Can't find clock control module.\n"); return; } ccm = of_iomap(np, 0); of_node_put(np); if (!ccm) { pr_err("Can't map clock control module reg.\n"); return; } np = of_find_node_by_type(NULL, "cpu"); if (np) { const unsigned int *prop = of_get_property(np, "bus-frequency", NULL); of_node_put(np); if (prop) { busfreq = *prop; } else { pr_err("Can't get bus-frequency property\n"); return; } } else { pr_err("Can't find 'cpu' node.\n"); return; } /* Pixel Clock configuration */ pr_debug("DIU: Bus Frequency = %lu\n", busfreq); speed = busfreq * 4; /* DIU_DIV ratio is 4 * CSB_CLK / DIU_CLK */ /* Calculate the pixel clock with the smallest error */ /* calculate the following in steps to avoid overflow */ pr_debug("DIU pixclock in ps - %d\n", pixclock); temp = (1000000000 / pixclock) * 1000; pixclock = temp; pr_debug("DIU pixclock freq - %u\n", pixclock); temp = temp / 20; /* pixclock * 0.05 */ pr_debug("deviation = %d\n", temp); minpixclock = pixclock - temp; maxpixclock = pixclock + temp; pr_debug("DIU minpixclock - %lu\n", minpixclock); pr_debug("DIU maxpixclock - %lu\n", maxpixclock); pixval = speed/pixclock; pr_debug("DIU pixval = %lu\n", pixval); err = LONG_MAX; bestval = pixval; pr_debug("DIU bestval = %lu\n", bestval); bestfreq = 0; for (i = -1; i <= 1; i++) { temp = speed / (pixval+i); pr_debug("DIU test pixval i=%d, pixval=%lu, temp freq. = %u\n", i, pixval, temp); if ((temp < minpixclock) || (temp > maxpixclock)) pr_debug("DIU exceeds monitor range (%lu to %lu)\n", minpixclock, maxpixclock); else if (abs(temp - pixclock) < err) { pr_debug("Entered the else if block %d\n", i); err = abs(temp - pixclock); bestval = pixval + i; bestfreq = temp; } } pr_debug("DIU chose = %lx\n", bestval); pr_debug("DIU error = %ld\n NomPixClk ", err); pr_debug("DIU: Best Freq = %lx\n", bestfreq); /* Modify DIU_DIV in CCM SCFR1 */ temp = in_be32(&ccm->scfr1); pr_debug("DIU: Current value of SCFR1: 0x%08x\n", temp); temp &= ~DIU_DIV_MASK; temp |= (bestval & DIU_DIV_MASK); out_be32(&ccm->scfr1, temp); pr_debug("DIU: Modified value of SCFR1: 0x%08x\n", temp); iounmap(ccm); } ssize_t mpc512x_show_monitor_port(int monitor_port, char *buf) { return sprintf(buf, "0 - 5121 LCD\n"); } int mpc512x_set_sysfs_monitor_port(int val) { return 0; } static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb; #if defined(CONFIG_FB_FSL_DIU) || \ defined(CONFIG_FB_FSL_DIU_MODULE) static inline void mpc512x_free_bootmem(struct page *page) { __ClearPageReserved(page); BUG_ON(PageTail(page)); BUG_ON(atomic_read(&page->_count) > 1); atomic_set(&page->_count, 1); __free_page(page); totalram_pages++; } void mpc512x_release_bootmem(void) { unsigned long addr = diu_shared_fb.fb_phys & PAGE_MASK; unsigned long size = diu_shared_fb.fb_len; unsigned long start, end; if (diu_shared_fb.in_use) { start = PFN_UP(addr); end = PFN_DOWN(addr + size); for (; start < end; start++) mpc512x_free_bootmem(pfn_to_page(start)); diu_shared_fb.in_use = false; } diu_ops.release_bootmem = NULL; } #endif /* * Check if DIU was pre-initialized. If so, perform steps * needed to continue displaying through the whole boot process. * Move area descriptor and gamma table elsewhere, they are * destroyed by bootmem allocator otherwise. The frame buffer * address range will be reserved in setup_arch() after bootmem * allocator is up. */ void __init mpc512x_init_diu(void) { struct device_node *np; struct diu __iomem *diu_reg; phys_addr_t desc; void __iomem *vaddr; unsigned long mode, pix_fmt, res, bpp; unsigned long dst; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-diu"); if (!np) { pr_err("No DIU node\n"); return; } diu_reg = of_iomap(np, 0); of_node_put(np); if (!diu_reg) { pr_err("Can't map DIU\n"); return; } mode = in_be32(&diu_reg->diu_mode); if (mode != MFB_MODE1) { pr_info("%s: DIU OFF\n", __func__); goto out; } desc = in_be32(&diu_reg->desc[0]); vaddr = ioremap(desc, sizeof(struct diu_ad)); if (!vaddr) { pr_err("Can't map DIU area desc.\n"); goto out; } memcpy(&diu_shared_fb.ad0, vaddr, sizeof(struct diu_ad)); /* flush fb area descriptor */ dst = (unsigned long)&diu_shared_fb.ad0; flush_dcache_range(dst, dst + sizeof(struct diu_ad) - 1); res = in_be32(&diu_reg->disp_size); pix_fmt = in_le32(vaddr); bpp = ((pix_fmt >> 16) & 0x3) + 1; diu_shared_fb.fb_phys = in_le32(vaddr + 4); diu_shared_fb.fb_len = ((res & 0xfff0000) >> 16) * (res & 0xfff) * bpp; diu_shared_fb.in_use = true; iounmap(vaddr); desc = in_be32(&diu_reg->gamma); vaddr = ioremap(desc, sizeof(diu_shared_fb.gamma)); if (!vaddr) { pr_err("Can't map DIU area desc.\n"); diu_shared_fb.in_use = false; goto out; } memcpy(&diu_shared_fb.gamma, vaddr, sizeof(diu_shared_fb.gamma)); /* flush gamma table */ dst = (unsigned long)&diu_shared_fb.gamma; flush_dcache_range(dst, dst + sizeof(diu_shared_fb.gamma) - 1); iounmap(vaddr); out_be32(&diu_reg->gamma, virt_to_phys(&diu_shared_fb.gamma)); out_be32(&diu_reg->desc[1], 0); out_be32(&diu_reg->desc[2], 0); out_be32(&diu_reg->desc[0], virt_to_phys(&diu_shared_fb.ad0)); out: iounmap(diu_reg); } void __init mpc512x_setup_diu(void) { int ret; /* * We do not allocate and configure new area for bitmap buffer * because it would requere copying bitmap data (splash image) * and so negatively affect boot time. Instead we reserve the * already configured frame buffer area so that it won't be * destroyed. The starting address of the area to reserve and * also it's length is passed to reserve_bootmem(). It will be * freed later on first open of fbdev, when splash image is not * needed any more. */ if (diu_shared_fb.in_use) { ret = reserve_bootmem(diu_shared_fb.fb_phys, diu_shared_fb.fb_len, BOOTMEM_EXCLUSIVE); if (ret) { pr_err("%s: reserve bootmem failed\n", __func__); diu_shared_fb.in_use = false; } } #if defined(CONFIG_FB_FSL_DIU) || \ defined(CONFIG_FB_FSL_DIU_MODULE) diu_ops.get_pixel_format = mpc512x_get_pixel_format; diu_ops.set_gamma_table = mpc512x_set_gamma_table; diu_ops.set_monitor_port = mpc512x_set_monitor_port; diu_ops.set_pixel_clock = mpc512x_set_pixel_clock; diu_ops.show_monitor_port = mpc512x_show_monitor_port; diu_ops.set_sysfs_monitor_port = mpc512x_set_sysfs_monitor_port; diu_ops.release_bootmem = mpc512x_release_bootmem; #endif } void __init mpc512x_init_IRQ(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-ipic"); if (!np) return; ipic_init(np, 0); of_node_put(np); /* * Initialize the default interrupt mapping priorities, * in case the boot rom changed something on us. */ ipic_set_default_priority(); } /* * Nodes to do bus probe on, soc and localbus */ static struct of_device_id __initdata of_bus_ids[] = { { .compatible = "fsl,mpc5121-immr", }, { .compatible = "fsl,mpc5121-localbus", }, {}, }; void __init mpc512x_declare_of_platform_devices(void) { struct device_node *np; if (of_platform_bus_probe(NULL, of_bus_ids, NULL)) printk(KERN_ERR __FILE__ ": " "Error while probing of_platform bus\n"); np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-nfc"); if (np) { of_platform_device_create(np, NULL, NULL); of_node_put(np); } } #define DEFAULT_FIFO_SIZE 16 static unsigned int __init get_fifo_size(struct device_node *np, char *prop_name) { const unsigned int *fp; fp = of_get_property(np, prop_name, NULL); if (fp) return *fp; pr_warning("no %s property in %s node, defaulting to %d\n", prop_name, np->full_name, DEFAULT_FIFO_SIZE); return DEFAULT_FIFO_SIZE; } #define FIFOC(_base) ((struct mpc512x_psc_fifo __iomem *) \ ((u32)(_base) + sizeof(struct mpc52xx_psc))) /* Init PSC FIFO space for TX and RX slices */ void __init mpc512x_psc_fifo_init(void) { struct device_node *np; void __iomem *psc; unsigned int tx_fifo_size; unsigned int rx_fifo_size; int fifobase = 0; /* current fifo address in 32 bit words */ for_each_compatible_node(np, NULL, "fsl,mpc5121-psc") { tx_fifo_size = get_fifo_size(np, "fsl,tx-fifo-size"); rx_fifo_size = get_fifo_size(np, "fsl,rx-fifo-size"); /* size in register is in 4 byte units */ tx_fifo_size /= 4; rx_fifo_size /= 4; if (!tx_fifo_size) tx_fifo_size = 1; if (!rx_fifo_size) rx_fifo_size = 1; psc = of_iomap(np, 0); if (!psc) { pr_err("%s: Can't map %s device\n", __func__, np->full_name); continue; } /* FIFO space is 4KiB, check if requested size is available */ if ((fifobase + tx_fifo_size + rx_fifo_size) > 0x1000) { pr_err("%s: no fifo space available for %s\n", __func__, np->full_name); iounmap(psc); /* * chances are that another device requests less * fifo space, so we continue. */ continue; } /* set tx and rx fifo size registers */ out_be32(&FIFOC(psc)->txsz, (fifobase << 16) | tx_fifo_size); fifobase += tx_fifo_size; out_be32(&FIFOC(psc)->rxsz, (fifobase << 16) | rx_fifo_size); fifobase += rx_fifo_size; /* reset and enable the slices */ out_be32(&FIFOC(psc)->txcmd, 0x80); out_be32(&FIFOC(psc)->txcmd, 0x01); out_be32(&FIFOC(psc)->rxcmd, 0x80); out_be32(&FIFOC(psc)->rxcmd, 0x01); iounmap(psc); } } void __init mpc512x_init(void) { mpc512x_declare_of_platform_devices(); mpc5121_clk_init(); mpc512x_restart_init(); mpc512x_psc_fifo_init(); }
gpl-2.0
poparteu/linux-kernel-hal
drivers/media/tuners/tua9001.c
3731
6554
/* * Infineon TUA 9001 silicon tuner driver * * Copyright (C) 2009 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "tua9001.h" #include "tua9001_priv.h" /* write register */ static int tua9001_wr_reg(struct tua9001_priv *priv, u8 reg, u16 val) { int ret; u8 buf[3] = { reg, (val >> 8) & 0xff, (val >> 0) & 0xff }; struct i2c_msg msg[1] = { { .addr = priv->cfg->i2c_addr, .flags = 0, .len = sizeof(buf), .buf = buf, } }; ret = i2c_transfer(priv->i2c, msg, 1); if (ret == 1) { ret = 0; } else { dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d reg=%02x\n", KBUILD_MODNAME, ret, reg); ret = -EREMOTEIO; } return ret; } static int tua9001_release(struct dvb_frontend *fe) { struct tua9001_priv *priv = fe->tuner_priv; int ret = 0; dev_dbg(&priv->i2c->dev, "%s:\n", __func__); if (fe->callback) ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, TUA9001_CMD_CEN, 0); kfree(fe->tuner_priv); fe->tuner_priv = NULL; return ret; } static int tua9001_init(struct dvb_frontend *fe) { struct tua9001_priv *priv = fe->tuner_priv; int ret = 0; u8 i; struct reg_val data[] = { { 0x1e, 0x6512 }, { 0x25, 0xb888 }, { 0x39, 0x5460 }, { 0x3b, 0x00c0 }, { 0x3a, 0xf000 }, { 0x08, 0x0000 }, { 0x32, 0x0030 }, { 0x41, 0x703a }, { 0x40, 0x1c78 }, { 0x2c, 0x1c00 }, { 0x36, 0xc013 }, { 0x37, 0x6f18 }, { 0x27, 0x0008 }, { 0x2a, 0x0001 }, { 0x34, 0x0a40 }, }; dev_dbg(&priv->i2c->dev, "%s:\n", __func__); if (fe->callback) { ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, TUA9001_CMD_RESETN, 0); if (ret < 0) goto err; } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c-gate */ for (i = 0; i < ARRAY_SIZE(data); i++) { ret = tua9001_wr_reg(priv, data[i].reg, data[i].val); if (ret < 0) goto err_i2c_gate_ctrl; } err_i2c_gate_ctrl: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c-gate */ err: if (ret < 0) dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static int tua9001_sleep(struct dvb_frontend *fe) { struct tua9001_priv *priv = fe->tuner_priv; int ret = 0; dev_dbg(&priv->i2c->dev, "%s:\n", __func__); if (fe->callback) ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, TUA9001_CMD_RESETN, 1); if (ret < 0) dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static int tua9001_set_params(struct dvb_frontend *fe) { struct tua9001_priv *priv = fe->tuner_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret = 0, i; u16 val; u32 frequency; struct reg_val data[2]; dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d frequency=%d " \ "bandwidth_hz=%d\n", __func__, c->delivery_system, c->frequency, c->bandwidth_hz); switch (c->delivery_system) { case SYS_DVBT: switch (c->bandwidth_hz) { case 8000000: val = 0x0000; break; case 7000000: val = 0x1000; break; case 6000000: val = 0x2000; break; case 5000000: val = 0x3000; break; default: ret = -EINVAL; goto err; } break; default: ret = -EINVAL; goto err; } data[0].reg = 0x04; data[0].val = val; frequency = (c->frequency - 150000000); frequency /= 100; frequency *= 48; frequency /= 10000; data[1].reg = 0x1f; data[1].val = frequency; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c-gate */ if (fe->callback) { ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, TUA9001_CMD_RXEN, 0); if (ret < 0) goto err_i2c_gate_ctrl; } for (i = 0; i < ARRAY_SIZE(data); i++) { ret = tua9001_wr_reg(priv, data[i].reg, data[i].val); if (ret < 0) goto err_i2c_gate_ctrl; } if (fe->callback) { ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, TUA9001_CMD_RXEN, 1); if (ret < 0) goto err_i2c_gate_ctrl; } err_i2c_gate_ctrl: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c-gate */ err: if (ret < 0) dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static int tua9001_get_if_frequency(struct dvb_frontend *fe, u32 *frequency) { struct tua9001_priv *priv = fe->tuner_priv; dev_dbg(&priv->i2c->dev, "%s:\n", __func__); *frequency = 0; /* Zero-IF */ return 0; } static const struct dvb_tuner_ops tua9001_tuner_ops = { .info = { .name = "Infineon TUA 9001", .frequency_min = 170000000, .frequency_max = 862000000, .frequency_step = 0, }, .release = tua9001_release, .init = tua9001_init, .sleep = tua9001_sleep, .set_params = tua9001_set_params, .get_if_frequency = tua9001_get_if_frequency, }; struct dvb_frontend *tua9001_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct tua9001_config *cfg) { struct tua9001_priv *priv = NULL; int ret; priv = kzalloc(sizeof(struct tua9001_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->cfg = cfg; priv->i2c = i2c; if (fe->callback) { ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, TUA9001_CMD_CEN, 1); if (ret < 0) goto err; ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, TUA9001_CMD_RXEN, 0); if (ret < 0) goto err; ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, TUA9001_CMD_RESETN, 1); if (ret < 0) goto err; } dev_info(&priv->i2c->dev, "%s: Infineon TUA 9001 successfully attached\n", KBUILD_MODNAME); memcpy(&fe->ops.tuner_ops, &tua9001_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = priv; return fe; err: dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret); kfree(priv); return NULL; } EXPORT_SYMBOL(tua9001_attach); MODULE_DESCRIPTION("Infineon TUA 9001 silicon tuner driver"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_LICENSE("GPL");
gpl-2.0
LorDClockaN/Ace-2.6.35
sound/drivers/opl4/opl4_proc.c
3987
3506
/* * Functions for the OPL4 proc file * Copyright (c) 2003 by Clemens Ladisch <clemens@ladisch.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "opl4_local.h" #include <linux/vmalloc.h> #include <sound/info.h> #ifdef CONFIG_PROC_FS static int snd_opl4_mem_proc_open(struct snd_info_entry *entry, unsigned short mode, void **file_private_data) { struct snd_opl4 *opl4 = entry->private_data; mutex_lock(&opl4->access_mutex); if (opl4->memory_access) { mutex_unlock(&opl4->access_mutex); return -EBUSY; } opl4->memory_access++; mutex_unlock(&opl4->access_mutex); return 0; } static int snd_opl4_mem_proc_release(struct snd_info_entry *entry, unsigned short mode, void *file_private_data) { struct snd_opl4 *opl4 = entry->private_data; mutex_lock(&opl4->access_mutex); opl4->memory_access--; mutex_unlock(&opl4->access_mutex); return 0; } static ssize_t snd_opl4_mem_proc_read(struct snd_info_entry *entry, void *file_private_data, struct file *file, char __user *_buf, size_t count, loff_t pos) { struct snd_opl4 *opl4 = entry->private_data; char* buf; buf = vmalloc(count); if (!buf) return -ENOMEM; snd_opl4_read_memory(opl4, buf, pos, count); if (copy_to_user(_buf, buf, count)) { vfree(buf); return -EFAULT; } vfree(buf); return count; } static ssize_t snd_opl4_mem_proc_write(struct snd_info_entry *entry, void *file_private_data, struct file *file, const char __user *_buf, size_t count, loff_t pos) { struct snd_opl4 *opl4 = entry->private_data; char *buf; buf = vmalloc(count); if (!buf) return -ENOMEM; if (copy_from_user(buf, _buf, count)) { vfree(buf); return -EFAULT; } snd_opl4_write_memory(opl4, buf, pos, count); vfree(buf); return count; } static struct snd_info_entry_ops snd_opl4_mem_proc_ops = { .open = snd_opl4_mem_proc_open, .release = snd_opl4_mem_proc_release, .read = snd_opl4_mem_proc_read, .write = snd_opl4_mem_proc_write, }; int snd_opl4_create_proc(struct snd_opl4 *opl4) { struct snd_info_entry *entry; entry = snd_info_create_card_entry(opl4->card, "opl4-mem", opl4->card->proc_root); if (entry) { if (opl4->hardware < OPL3_HW_OPL4_ML) { /* OPL4 can access 4 MB external ROM/SRAM */ entry->mode |= S_IWUSR; entry->size = 4 * 1024 * 1024; } else { /* OPL4-ML has 1 MB internal ROM */ entry->size = 1 * 1024 * 1024; } entry->content = SNDRV_INFO_CONTENT_DATA; entry->c.ops = &snd_opl4_mem_proc_ops; entry->module = THIS_MODULE; entry->private_data = opl4; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } opl4->proc_entry = entry; return 0; } void snd_opl4_free_proc(struct snd_opl4 *opl4) { snd_info_free_entry(opl4->proc_entry); } #endif /* CONFIG_PROC_FS */
gpl-2.0
RolanDroid/lge_kernel_lproj
arch/arm/mach-msm/board-mahimahi-tpa2018d1.c
4499
8209
/* drivers/i2c/chips/tpa2018d1.c * * TI TPA2018D1 Speaker Amplifier * * Copyright (C) 2009 HTC Corporation * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* TODO: content validation in TPA2018_SET_CONFIG */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/i2c.h> #include <linux/miscdevice.h> #include <linux/gpio.h> #include <asm/uaccess.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/tpa2018d1.h> #include "board-mahimahi-tpa2018d1.h" static struct i2c_client *this_client; static struct tpa2018d1_platform_data *pdata; static int is_on; static char spk_amp_cfg[8]; static const char spk_amp_on[8] = { /* same length as spk_amp_cfg */ 0x01, 0xc3, 0x20, 0x01, 0x00, 0x08, 0x1a, 0x21 }; static const char spk_amp_off[] = {0x01, 0xa2}; static DEFINE_MUTEX(spk_amp_lock); static int tpa2018d1_opened; static char *config_data; static int tpa2018d1_num_modes; #define DEBUG 0 static int tpa2018_i2c_write(const char *txData, int length) { struct i2c_msg msg[] = { { .addr = this_client->addr, .flags = 0, .len = length, .buf = txData, }, }; if (i2c_transfer(this_client->adapter, msg, 1) < 0) { pr_err("%s: I2C transfer error\n", __func__); return -EIO; } else return 0; } static int tpa2018_i2c_read(char *rxData, int length) { struct i2c_msg msgs[] = { { .addr = this_client->addr, .flags = I2C_M_RD, .len = length, .buf = rxData, }, }; if (i2c_transfer(this_client->adapter, msgs, 1) < 0) { pr_err("%s: I2C transfer error\n", __func__); return -EIO; } #if DEBUG do { int i = 0; for (i = 0; i < length; i++) pr_info("%s: rx[%d] = %2x\n", __func__, i, rxData[i]); } while(0); #endif return 0; } static int tpa2018d1_open(struct inode *inode, struct file *file) { int rc = 0; mutex_lock(&spk_amp_lock); if (tpa2018d1_opened) { pr_err("%s: busy\n", __func__); rc = -EBUSY; goto done; } tpa2018d1_opened = 1; done: mutex_unlock(&spk_amp_lock); return rc; } static int tpa2018d1_release(struct inode *inode, struct file *file) { mutex_lock(&spk_amp_lock); tpa2018d1_opened = 0; mutex_unlock(&spk_amp_lock); return 0; } static int tpa2018d1_read_config(void __user *argp) { int rc = 0; unsigned char reg_idx = 0x01; unsigned char tmp[7]; if (!is_on) { gpio_set_value(pdata->gpio_tpa2018_spk_en, 1); msleep(5); /* According to TPA2018D1 Spec */ } rc = tpa2018_i2c_write(&reg_idx, sizeof(reg_idx)); if (rc < 0) goto err; rc = tpa2018_i2c_read(tmp, sizeof(tmp)); if (rc < 0) goto err; if (copy_to_user(argp, &tmp, sizeof(tmp))) rc = -EFAULT; err: if (!is_on) gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); return rc; } static int tpa2018d1_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int rc = 0; int mode = -1; int offset = 0; struct tpa2018d1_config_data cfg; mutex_lock(&spk_amp_lock); switch (cmd) { case TPA2018_SET_CONFIG: if (copy_from_user(spk_amp_cfg, argp, sizeof(spk_amp_cfg))) rc = -EFAULT; break; case TPA2018_READ_CONFIG: rc = tpa2018d1_read_config(argp); break; case TPA2018_SET_MODE: if (copy_from_user(&mode, argp, sizeof(mode))) { rc = -EFAULT; break; } if (mode >= tpa2018d1_num_modes || mode < 0) { pr_err("%s: unsupported tpa2018d1 mode %d\n", __func__, mode); rc = -EINVAL; break; } if (!config_data) { pr_err("%s: no config data!\n", __func__); rc = -EIO; break; } memcpy(spk_amp_cfg, config_data + mode * TPA2018D1_CMD_LEN, TPA2018D1_CMD_LEN); break; case TPA2018_SET_PARAM: if (copy_from_user(&cfg, argp, sizeof(cfg))) { pr_err("%s: copy from user failed.\n", __func__); rc = -EFAULT; break; } tpa2018d1_num_modes = cfg.mode_num; if (tpa2018d1_num_modes > TPA2018_NUM_MODES) { pr_err("%s: invalid number of modes %d\n", __func__, tpa2018d1_num_modes); rc = -EINVAL; break; } if (cfg.data_len != tpa2018d1_num_modes*TPA2018D1_CMD_LEN) { pr_err("%s: invalid data length %d, expecting %d\n", __func__, cfg.data_len, tpa2018d1_num_modes * TPA2018D1_CMD_LEN); rc = -EINVAL; break; } /* Free the old data */ if (config_data) kfree(config_data); config_data = kmalloc(cfg.data_len, GFP_KERNEL); if (!config_data) { pr_err("%s: out of memory\n", __func__); rc = -ENOMEM; break; } if (copy_from_user(config_data, cfg.cmd_data, cfg.data_len)) { pr_err("%s: copy data from user failed.\n", __func__); kfree(config_data); config_data = NULL; rc = -EFAULT; break; } /* replace default setting with playback setting */ if (tpa2018d1_num_modes >= TPA2018_MODE_PLAYBACK) { offset = TPA2018_MODE_PLAYBACK * TPA2018D1_CMD_LEN; memcpy(spk_amp_cfg, config_data + offset, TPA2018D1_CMD_LEN); } break; default: pr_err("%s: invalid command %d\n", __func__, _IOC_NR(cmd)); rc = -EINVAL; break; } mutex_unlock(&spk_amp_lock); return rc; } static struct file_operations tpa2018d1_fops = { .owner = THIS_MODULE, .open = tpa2018d1_open, .release = tpa2018d1_release, .ioctl = tpa2018d1_ioctl, }; static struct miscdevice tpa2018d1_device = { .minor = MISC_DYNAMIC_MINOR, .name = "tpa2018d1", .fops = &tpa2018d1_fops, }; void tpa2018d1_set_speaker_amp(int on) { if (!pdata) { pr_err("%s: no platform data!\n", __func__); return; } mutex_lock(&spk_amp_lock); if (on && !is_on) { gpio_set_value(pdata->gpio_tpa2018_spk_en, 1); msleep(5); /* According to TPA2018D1 Spec */ if (tpa2018_i2c_write(spk_amp_cfg, sizeof(spk_amp_cfg)) == 0) { is_on = 1; pr_info("%s: ON\n", __func__); } } else if (!on && is_on) { if (tpa2018_i2c_write(spk_amp_off, sizeof(spk_amp_off)) == 0) { is_on = 0; msleep(2); gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); pr_info("%s: OFF\n", __func__); } } mutex_unlock(&spk_amp_lock); } static int tpa2018d1_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0; pdata = client->dev.platform_data; if (!pdata) { ret = -EINVAL; pr_err("%s: platform data is NULL\n", __func__); goto err_no_pdata; } this_client = client; ret = gpio_request(pdata->gpio_tpa2018_spk_en, "tpa2018"); if (ret < 0) { pr_err("%s: gpio request aud_spk_en pin failed\n", __func__); goto err_free_gpio; } ret = gpio_direction_output(pdata->gpio_tpa2018_spk_en, 1); if (ret < 0) { pr_err("%s: request aud_spk_en gpio direction failed\n", __func__); goto err_free_gpio; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { pr_err("%s: i2c check functionality error\n", __func__); ret = -ENODEV; goto err_free_gpio; } gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); /* Default Low */ ret = misc_register(&tpa2018d1_device); if (ret) { pr_err("%s: tpa2018d1_device register failed\n", __func__); goto err_free_gpio; } memcpy(spk_amp_cfg, spk_amp_on, sizeof(spk_amp_on)); return 0; err_free_gpio: gpio_free(pdata->gpio_tpa2018_spk_en); err_no_pdata: return ret; } static int tpa2018d1_suspend(struct i2c_client *client, pm_message_t mesg) { return 0; } static int tpa2018d1_resume(struct i2c_client *client) { return 0; } static const struct i2c_device_id tpa2018d1_id[] = { { TPA2018D1_I2C_NAME, 0 }, { } }; static struct i2c_driver tpa2018d1_driver = { .probe = tpa2018d1_probe, .suspend = tpa2018d1_suspend, .resume = tpa2018d1_resume, .id_table = tpa2018d1_id, .driver = { .name = TPA2018D1_I2C_NAME, }, }; static int __init tpa2018d1_init(void) { pr_info("%s\n", __func__); return i2c_add_driver(&tpa2018d1_driver); } module_init(tpa2018d1_init); MODULE_DESCRIPTION("tpa2018d1 speaker amp driver"); MODULE_LICENSE("GPL");
gpl-2.0
mkasick/android_kernel_samsung_jfltespr
drivers/media/video/davinci/vpif.c
5523
11380
/* * vpif - DM646x Video Port Interface driver * VPIF is a receiver and transmitter for video data. It has two channels(0, 1) * that receiveing video byte stream and two channels(2, 3) for video output. * The hardware supports SDTV, HDTV formats, raw data capture. * Currently, the driver supports NTSC and PAL standards. * * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed .as is. WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/io.h> #include <mach/hardware.h> #include "vpif.h" MODULE_DESCRIPTION("TI DaVinci Video Port Interface driver"); MODULE_LICENSE("GPL"); #define VPIF_CH0_MAX_MODES (22) #define VPIF_CH1_MAX_MODES (02) #define VPIF_CH2_MAX_MODES (15) #define VPIF_CH3_MAX_MODES (02) static resource_size_t res_len; static struct resource *res; spinlock_t vpif_lock; void __iomem *vpif_base; /** * ch_params: video standard configuration parameters for vpif * The table must include all presets from supported subdevices. */ const struct vpif_channel_config_params ch_params[] = { /* HDTV formats */ { .name = "480p59_94", .width = 720, .height = 480, .frm_fmt = 1, .ycmux_mode = 0, .eav2sav = 138-8, .sav2eav = 720, .l1 = 1, .l3 = 43, .l5 = 523, .vsize = 525, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_preset = V4L2_DV_480P59_94, }, { .name = "576p50", .width = 720, .height = 576, .frm_fmt = 1, .ycmux_mode = 0, .eav2sav = 144-8, .sav2eav = 720, .l1 = 1, .l3 = 45, .l5 = 621, .vsize = 625, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_preset = V4L2_DV_576P50, }, { .name = "720p50", .width = 1280, .height = 720, .frm_fmt = 1, .ycmux_mode = 0, .eav2sav = 700-8, .sav2eav = 1280, .l1 = 1, .l3 = 26, .l5 = 746, .vsize = 750, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_preset = V4L2_DV_720P50, }, { .name = "720p60", .width = 1280, .height = 720, .frm_fmt = 1, .ycmux_mode = 0, .eav2sav = 370 - 8, .sav2eav = 1280, .l1 = 1, .l3 = 26, .l5 = 746, .vsize = 750, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_preset = V4L2_DV_720P60, }, { .name = "1080I50", .width = 1920, .height = 1080, .frm_fmt = 0, .ycmux_mode = 0, .eav2sav = 720 - 8, .sav2eav = 1920, .l1 = 1, .l3 = 21, .l5 = 561, .l7 = 563, .l9 = 584, .l11 = 1124, .vsize = 1125, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_preset = V4L2_DV_1080I50, }, { .name = "1080I60", .width = 1920, .height = 1080, .frm_fmt = 0, .ycmux_mode = 0, .eav2sav = 280 - 8, .sav2eav = 1920, .l1 = 1, .l3 = 21, .l5 = 561, .l7 = 563, .l9 = 584, .l11 = 1124, .vsize = 1125, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_preset = V4L2_DV_1080I60, }, { .name = "1080p60", .width = 1920, .height = 1080, .frm_fmt = 1, .ycmux_mode = 0, .eav2sav = 280 - 8, .sav2eav = 1920, .l1 = 1, .l3 = 42, .l5 = 1122, .vsize = 1125, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_preset = V4L2_DV_1080P60, }, /* SDTV formats */ { .name = "NTSC_M", .width = 720, .height = 480, .frm_fmt = 0, .ycmux_mode = 1, .eav2sav = 268, .sav2eav = 1440, .l1 = 1, .l3 = 23, .l5 = 263, .l7 = 266, .l9 = 286, .l11 = 525, .vsize = 525, .capture_format = 0, .vbi_supported = 1, .hd_sd = 0, .stdid = V4L2_STD_525_60, }, { .name = "PAL_BDGHIK", .width = 720, .height = 576, .frm_fmt = 0, .ycmux_mode = 1, .eav2sav = 280, .sav2eav = 1440, .l1 = 1, .l3 = 23, .l5 = 311, .l7 = 313, .l9 = 336, .l11 = 624, .vsize = 625, .capture_format = 0, .vbi_supported = 1, .hd_sd = 0, .stdid = V4L2_STD_625_50, }, }; const unsigned int vpif_ch_params_count = ARRAY_SIZE(ch_params); static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val) { if (val) vpif_set_bit(reg, bit); else vpif_clr_bit(reg, bit); } /* This structure is used to keep track of VPIF size register's offsets */ struct vpif_registers { u32 h_cfg, v_cfg_00, v_cfg_01, v_cfg_02, v_cfg, ch_ctrl; u32 line_offset, vanc0_strt, vanc0_size, vanc1_strt; u32 vanc1_size, width_mask, len_mask; u8 max_modes; }; static const struct vpif_registers vpifregs[VPIF_NUM_CHANNELS] = { /* Channel0 */ { VPIF_CH0_H_CFG, VPIF_CH0_V_CFG_00, VPIF_CH0_V_CFG_01, VPIF_CH0_V_CFG_02, VPIF_CH0_V_CFG_03, VPIF_CH0_CTRL, VPIF_CH0_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF, VPIF_CH0_MAX_MODES, }, /* Channel1 */ { VPIF_CH1_H_CFG, VPIF_CH1_V_CFG_00, VPIF_CH1_V_CFG_01, VPIF_CH1_V_CFG_02, VPIF_CH1_V_CFG_03, VPIF_CH1_CTRL, VPIF_CH1_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF, VPIF_CH1_MAX_MODES, }, /* Channel2 */ { VPIF_CH2_H_CFG, VPIF_CH2_V_CFG_00, VPIF_CH2_V_CFG_01, VPIF_CH2_V_CFG_02, VPIF_CH2_V_CFG_03, VPIF_CH2_CTRL, VPIF_CH2_IMG_ADD_OFST, VPIF_CH2_VANC0_STRT, VPIF_CH2_VANC0_SIZE, VPIF_CH2_VANC1_STRT, VPIF_CH2_VANC1_SIZE, 0x7FF, 0x7FF, VPIF_CH2_MAX_MODES }, /* Channel3 */ { VPIF_CH3_H_CFG, VPIF_CH3_V_CFG_00, VPIF_CH3_V_CFG_01, VPIF_CH3_V_CFG_02, VPIF_CH3_V_CFG_03, VPIF_CH3_CTRL, VPIF_CH3_IMG_ADD_OFST, VPIF_CH3_VANC0_STRT, VPIF_CH3_VANC0_SIZE, VPIF_CH3_VANC1_STRT, VPIF_CH3_VANC1_SIZE, 0x7FF, 0x7FF, VPIF_CH3_MAX_MODES }, }; /* vpif_set_mode_info: * This function is used to set horizontal and vertical config parameters * As per the standard in the channel, configure the values of L1, L3, * L5, L7 L9, L11 in VPIF Register , also write width and height */ static void vpif_set_mode_info(const struct vpif_channel_config_params *config, u8 channel_id, u8 config_channel_id) { u32 value; value = (config->eav2sav & vpifregs[config_channel_id].width_mask); value <<= VPIF_CH_LEN_SHIFT; value |= (config->sav2eav & vpifregs[config_channel_id].width_mask); regw(value, vpifregs[channel_id].h_cfg); value = (config->l1 & vpifregs[config_channel_id].len_mask); value <<= VPIF_CH_LEN_SHIFT; value |= (config->l3 & vpifregs[config_channel_id].len_mask); regw(value, vpifregs[channel_id].v_cfg_00); value = (config->l5 & vpifregs[config_channel_id].len_mask); value <<= VPIF_CH_LEN_SHIFT; value |= (config->l7 & vpifregs[config_channel_id].len_mask); regw(value, vpifregs[channel_id].v_cfg_01); value = (config->l9 & vpifregs[config_channel_id].len_mask); value <<= VPIF_CH_LEN_SHIFT; value |= (config->l11 & vpifregs[config_channel_id].len_mask); regw(value, vpifregs[channel_id].v_cfg_02); value = (config->vsize & vpifregs[config_channel_id].len_mask); regw(value, vpifregs[channel_id].v_cfg); } /* config_vpif_params * Function to set the parameters of a channel * Mainly modifies the channel ciontrol register * It sets frame format, yc mux mode */ static void config_vpif_params(struct vpif_params *vpifparams, u8 channel_id, u8 found) { const struct vpif_channel_config_params *config = &vpifparams->std_info; u32 value, ch_nip, reg; u8 start, end; int i; start = channel_id; end = channel_id + found; for (i = start; i < end; i++) { reg = vpifregs[i].ch_ctrl; if (channel_id < 2) ch_nip = VPIF_CAPTURE_CH_NIP; else ch_nip = VPIF_DISPLAY_CH_NIP; vpif_wr_bit(reg, ch_nip, config->frm_fmt); vpif_wr_bit(reg, VPIF_CH_YC_MUX_BIT, config->ycmux_mode); vpif_wr_bit(reg, VPIF_CH_INPUT_FIELD_FRAME_BIT, vpifparams->video_params.storage_mode); /* Set raster scanning SDR Format */ vpif_clr_bit(reg, VPIF_CH_SDR_FMT_BIT); vpif_wr_bit(reg, VPIF_CH_DATA_MODE_BIT, config->capture_format); if (channel_id > 1) /* Set the Pixel enable bit */ vpif_set_bit(reg, VPIF_DISPLAY_PIX_EN_BIT); else if (config->capture_format) { /* Set the polarity of various pins */ vpif_wr_bit(reg, VPIF_CH_FID_POLARITY_BIT, vpifparams->iface.fid_pol); vpif_wr_bit(reg, VPIF_CH_V_VALID_POLARITY_BIT, vpifparams->iface.vd_pol); vpif_wr_bit(reg, VPIF_CH_H_VALID_POLARITY_BIT, vpifparams->iface.hd_pol); value = regr(reg); /* Set data width */ value &= ((~(unsigned int)(0x3)) << VPIF_CH_DATA_WIDTH_BIT); value |= ((vpifparams->params.data_sz) << VPIF_CH_DATA_WIDTH_BIT); regw(value, reg); } /* Write the pitch in the driver */ regw((vpifparams->video_params.hpitch), vpifregs[i].line_offset); } } /* vpif_set_video_params * This function is used to set video parameters in VPIF register */ int vpif_set_video_params(struct vpif_params *vpifparams, u8 channel_id) { const struct vpif_channel_config_params *config = &vpifparams->std_info; int found = 1; vpif_set_mode_info(config, channel_id, channel_id); if (!config->ycmux_mode) { /* YC are on separate channels (HDTV formats) */ vpif_set_mode_info(config, channel_id + 1, channel_id); found = 2; } config_vpif_params(vpifparams, channel_id, found); regw(0x80, VPIF_REQ_SIZE); regw(0x01, VPIF_EMULATION_CTRL); return found; } EXPORT_SYMBOL(vpif_set_video_params); void vpif_set_vbi_display_params(struct vpif_vbi_params *vbiparams, u8 channel_id) { u32 value; value = 0x3F8 & (vbiparams->hstart0); value |= 0x3FFFFFF & ((vbiparams->vstart0) << 16); regw(value, vpifregs[channel_id].vanc0_strt); value = 0x3F8 & (vbiparams->hstart1); value |= 0x3FFFFFF & ((vbiparams->vstart1) << 16); regw(value, vpifregs[channel_id].vanc1_strt); value = 0x3F8 & (vbiparams->hsize0); value |= 0x3FFFFFF & ((vbiparams->vsize0) << 16); regw(value, vpifregs[channel_id].vanc0_size); value = 0x3F8 & (vbiparams->hsize1); value |= 0x3FFFFFF & ((vbiparams->vsize1) << 16); regw(value, vpifregs[channel_id].vanc1_size); } EXPORT_SYMBOL(vpif_set_vbi_display_params); int vpif_channel_getfid(u8 channel_id) { return (regr(vpifregs[channel_id].ch_ctrl) & VPIF_CH_FID_MASK) >> VPIF_CH_FID_SHIFT; } EXPORT_SYMBOL(vpif_channel_getfid); static int __init vpif_probe(struct platform_device *pdev) { int status = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENOENT; res_len = resource_size(res); res = request_mem_region(res->start, res_len, res->name); if (!res) return -EBUSY; vpif_base = ioremap(res->start, res_len); if (!vpif_base) { status = -EBUSY; goto fail; } spin_lock_init(&vpif_lock); dev_info(&pdev->dev, "vpif probe success\n"); return 0; fail: release_mem_region(res->start, res_len); return status; } static int __devexit vpif_remove(struct platform_device *pdev) { iounmap(vpif_base); release_mem_region(res->start, res_len); return 0; } static struct platform_driver vpif_driver = { .driver = { .name = "vpif", .owner = THIS_MODULE, }, .remove = __devexit_p(vpif_remove), .probe = vpif_probe, }; static void vpif_exit(void) { platform_driver_unregister(&vpif_driver); } static int __init vpif_init(void) { return platform_driver_register(&vpif_driver); } subsys_initcall(vpif_init); module_exit(vpif_exit);
gpl-2.0
OneEducation/kernel-rk310-lollipop-cx929
arch/powerpc/platforms/44x/sam440ep.c
8083
1977
/* * Sam440ep board specific routines based off bamboo.c code * original copyrights below * * Wade Farnsworth <wfarnsworth@mvista.com> * Copyright 2004 MontaVista Software Inc. * * Rewritten and ported to the merged powerpc tree: * Josh Boyer <jwboyer@linux.vnet.ibm.com> * Copyright 2007 IBM Corporation * * Modified from bamboo.c for sam440ep: * Copyright 2008 Giuseppe Coviello <gicoviello@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/of_platform.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/time.h> #include <asm/uic.h> #include <asm/pci-bridge.h> #include <asm/ppc4xx.h> #include <linux/i2c.h> static __initdata struct of_device_id sam440ep_of_bus[] = { { .compatible = "ibm,plb4", }, { .compatible = "ibm,opb", }, { .compatible = "ibm,ebc", }, {}, }; static int __init sam440ep_device_probe(void) { of_platform_bus_probe(NULL, sam440ep_of_bus, NULL); return 0; } machine_device_initcall(sam440ep, sam440ep_device_probe); static int __init sam440ep_probe(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "acube,sam440ep")) return 0; pci_set_flags(PCI_REASSIGN_ALL_RSRC); return 1; } define_machine(sam440ep) { .name = "Sam440ep", .probe = sam440ep_probe, .progress = udbg_progress, .init_IRQ = uic_init_tree, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, .calibrate_decr = generic_calibrate_decr, }; static struct i2c_board_info sam440ep_rtc_info = { .type = "m41st85", .addr = 0x68, .irq = -1, }; static int sam440ep_setup_rtc(void) { return i2c_register_board_info(0, &sam440ep_rtc_info, 1); } machine_device_initcall(sam440ep, sam440ep_setup_rtc);
gpl-2.0
tommytarts/QuantumKernelM8-Sense
arch/powerpc/platforms/44x/sam440ep.c
8083
1977
/* * Sam440ep board specific routines based off bamboo.c code * original copyrights below * * Wade Farnsworth <wfarnsworth@mvista.com> * Copyright 2004 MontaVista Software Inc. * * Rewritten and ported to the merged powerpc tree: * Josh Boyer <jwboyer@linux.vnet.ibm.com> * Copyright 2007 IBM Corporation * * Modified from bamboo.c for sam440ep: * Copyright 2008 Giuseppe Coviello <gicoviello@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/of_platform.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/time.h> #include <asm/uic.h> #include <asm/pci-bridge.h> #include <asm/ppc4xx.h> #include <linux/i2c.h> static __initdata struct of_device_id sam440ep_of_bus[] = { { .compatible = "ibm,plb4", }, { .compatible = "ibm,opb", }, { .compatible = "ibm,ebc", }, {}, }; static int __init sam440ep_device_probe(void) { of_platform_bus_probe(NULL, sam440ep_of_bus, NULL); return 0; } machine_device_initcall(sam440ep, sam440ep_device_probe); static int __init sam440ep_probe(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "acube,sam440ep")) return 0; pci_set_flags(PCI_REASSIGN_ALL_RSRC); return 1; } define_machine(sam440ep) { .name = "Sam440ep", .probe = sam440ep_probe, .progress = udbg_progress, .init_IRQ = uic_init_tree, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, .calibrate_decr = generic_calibrate_decr, }; static struct i2c_board_info sam440ep_rtc_info = { .type = "m41st85", .addr = 0x68, .irq = -1, }; static int sam440ep_setup_rtc(void) { return i2c_register_board_info(0, &sam440ep_rtc_info, 1); } machine_device_initcall(sam440ep, sam440ep_setup_rtc);
gpl-2.0
hmbedded/bbb-dac-old
drivers/scsi/fnic/vnic_dev.c
8339
14931
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/if_ether.h> #include <linux/slab.h> #include "vnic_resource.h" #include "vnic_devcmd.h" #include "vnic_dev.h" #include "vnic_stats.h" struct vnic_res { void __iomem *vaddr; unsigned int count; }; struct vnic_dev { void *priv; struct pci_dev *pdev; struct vnic_res res[RES_TYPE_MAX]; enum vnic_dev_intr_mode intr_mode; struct vnic_devcmd __iomem *devcmd; struct vnic_devcmd_notify *notify; struct vnic_devcmd_notify notify_copy; dma_addr_t notify_pa; u32 *linkstatus; dma_addr_t linkstatus_pa; struct vnic_stats *stats; dma_addr_t stats_pa; struct vnic_devcmd_fw_info *fw_info; dma_addr_t fw_info_pa; }; #define VNIC_MAX_RES_HDR_SIZE \ (sizeof(struct vnic_resource_header) + \ sizeof(struct vnic_resource) * RES_TYPE_MAX) #define VNIC_RES_STRIDE 128 void *vnic_dev_priv(struct vnic_dev *vdev) { return vdev->priv; } static int vnic_dev_discover_res(struct vnic_dev *vdev, struct vnic_dev_bar *bar) { struct vnic_resource_header __iomem *rh; struct vnic_resource __iomem *r; u8 type; if (bar->len < VNIC_MAX_RES_HDR_SIZE) { printk(KERN_ERR "vNIC BAR0 res hdr length error\n"); return -EINVAL; } rh = bar->vaddr; if (!rh) { printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n"); return -EINVAL; } if (ioread32(&rh->magic) != VNIC_RES_MAGIC || ioread32(&rh->version) != VNIC_RES_VERSION) { printk(KERN_ERR "vNIC BAR0 res magic/version error " "exp (%lx/%lx) curr (%x/%x)\n", VNIC_RES_MAGIC, VNIC_RES_VERSION, ioread32(&rh->magic), ioread32(&rh->version)); return -EINVAL; } r = (struct vnic_resource __iomem *)(rh + 1); while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { u8 bar_num = ioread8(&r->bar); u32 bar_offset = ioread32(&r->bar_offset); u32 count = ioread32(&r->count); u32 len; r++; if (bar_num != 0) /* only mapping in BAR0 resources */ continue; switch (type) { case RES_TYPE_WQ: case RES_TYPE_RQ: case RES_TYPE_CQ: case RES_TYPE_INTR_CTRL: /* each count is stride bytes long */ len = count * VNIC_RES_STRIDE; if (len + bar_offset > bar->len) { printk(KERN_ERR "vNIC BAR0 resource %d " "out-of-bounds, offset 0x%x + " "size 0x%x > bar len 0x%lx\n", type, bar_offset, len, bar->len); return -EINVAL; } break; case RES_TYPE_INTR_PBA_LEGACY: case RES_TYPE_DEVCMD: len = count; break; default: continue; } vdev->res[type].count = count; vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; } return 0; } unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, enum vnic_res_type type) { return vdev->res[type].count; } void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, unsigned int index) { if (!vdev->res[type].vaddr) return NULL; switch (type) { case RES_TYPE_WQ: case RES_TYPE_RQ: case RES_TYPE_CQ: case RES_TYPE_INTR_CTRL: return (char __iomem *)vdev->res[type].vaddr + index * VNIC_RES_STRIDE; default: return (char __iomem *)vdev->res[type].vaddr; } } unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) { /* The base address of the desc rings must be 512 byte aligned. * Descriptor count is aligned to groups of 32 descriptors. A * count of 0 means the maximum 4096 descriptors. Descriptor * size is aligned to 16 bytes. */ unsigned int count_align = 32; unsigned int desc_align = 16; ring->base_align = 512; if (desc_count == 0) desc_count = 4096; ring->desc_count = ALIGN(desc_count, count_align); ring->desc_size = ALIGN(desc_size, desc_align); ring->size = ring->desc_count * ring->desc_size; ring->size_unaligned = ring->size + ring->base_align; return ring->size_unaligned; } void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) { memset(ring->descs, 0, ring->size); } int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) { vnic_dev_desc_ring_size(ring, desc_count, desc_size); ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, ring->size_unaligned, &ring->base_addr_unaligned); if (!ring->descs_unaligned) { printk(KERN_ERR "Failed to allocate ring (size=%d), aborting\n", (int)ring->size); return -ENOMEM; } ring->base_addr = ALIGN(ring->base_addr_unaligned, ring->base_align); ring->descs = (u8 *)ring->descs_unaligned + (ring->base_addr - ring->base_addr_unaligned); vnic_dev_clear_desc_ring(ring); ring->desc_avail = ring->desc_count - 1; return 0; } void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) { if (ring->descs) { pci_free_consistent(vdev->pdev, ring->size_unaligned, ring->descs_unaligned, ring->base_addr_unaligned); ring->descs = NULL; } } int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) { struct vnic_devcmd __iomem *devcmd = vdev->devcmd; int delay; u32 status; int dev_cmd_err[] = { /* convert from fw's version of error.h to host's version */ 0, /* ERR_SUCCESS */ EINVAL, /* ERR_EINVAL */ EFAULT, /* ERR_EFAULT */ EPERM, /* ERR_EPERM */ EBUSY, /* ERR_EBUSY */ }; int err; status = ioread32(&devcmd->status); if (status & STAT_BUSY) { printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { writeq(*a0, &devcmd->args[0]); writeq(*a1, &devcmd->args[1]); wmb(); } iowrite32(cmd, &devcmd->cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) return 0; for (delay = 0; delay < wait; delay++) { udelay(100); status = ioread32(&devcmd->status); if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = dev_cmd_err[(int)readq(&devcmd->args[0])]; printk(KERN_ERR "Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); *a0 = readq(&devcmd->args[0]); *a1 = readq(&devcmd->args[1]); } return 0; } } printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; } int vnic_dev_fw_info(struct vnic_dev *vdev, struct vnic_devcmd_fw_info **fw_info) { u64 a0, a1 = 0; int wait = 1000; int err = 0; if (!vdev->fw_info) { vdev->fw_info = pci_alloc_consistent(vdev->pdev, sizeof(struct vnic_devcmd_fw_info), &vdev->fw_info_pa); if (!vdev->fw_info) return -ENOMEM; a0 = vdev->fw_info_pa; /* only get fw_info once and cache it */ err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); } *fw_info = vdev->fw_info; return err; } int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, void *value) { u64 a0, a1; int wait = 1000; int err; a0 = offset; a1 = size; err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); switch (size) { case 1: *(u8 *)value = (u8)a0; break; case 2: *(u16 *)value = (u16)a0; break; case 4: *(u32 *)value = (u32)a0; break; case 8: *(u64 *)value = a0; break; default: BUG(); break; } return err; } int vnic_dev_stats_clear(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); } int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) { u64 a0, a1; int wait = 1000; if (!vdev->stats) { vdev->stats = pci_alloc_consistent(vdev->pdev, sizeof(struct vnic_stats), &vdev->stats_pa); if (!vdev->stats) return -ENOMEM; } *stats = vdev->stats; a0 = vdev->stats_pa; a1 = sizeof(struct vnic_stats); return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); } int vnic_dev_close(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); } int vnic_dev_enable(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); } int vnic_dev_disable(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); } int vnic_dev_open(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); } int vnic_dev_open_done(struct vnic_dev *vdev, int *done) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; *done = 0; err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); if (err) return err; *done = (a0 == 0); return 0; } int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); } int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; *done = 0; err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait); if (err) return err; *done = (a0 == 0); return 0; } int vnic_dev_hang_notify(struct vnic_dev *vdev) { u64 a0, a1; int wait = 1000; return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); } int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) { u64 a0, a1; int wait = 1000; int err, i; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = 0; err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); if (err) return err; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = ((u8 *)&a0)[i]; return 0; } void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, int broadcast, int promisc, int allmulti) { u64 a0, a1 = 0; int wait = 1000; int err; a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | (multicast ? CMD_PFILTER_MULTICAST : 0) | (broadcast ? CMD_PFILTER_BROADCAST : 0) | (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); if (err) printk(KERN_ERR "Can't set packet filter\n"); } void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) ((u8 *)&a0)[i] = addr[i]; err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); if (err) printk(KERN_ERR "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], err); } void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) ((u8 *)&a0)[i] = addr[i]; err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); if (err) printk(KERN_ERR "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], err); } int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) { u64 a0, a1; int wait = 1000; if (!vdev->notify) { vdev->notify = pci_alloc_consistent(vdev->pdev, sizeof(struct vnic_devcmd_notify), &vdev->notify_pa); if (!vdev->notify) return -ENOMEM; } a0 = vdev->notify_pa; a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; a1 += sizeof(struct vnic_devcmd_notify); return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); } void vnic_dev_notify_unset(struct vnic_dev *vdev) { u64 a0, a1; int wait = 1000; a0 = 0; /* paddr = 0 to unset notify buffer */ a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ a1 += sizeof(struct vnic_devcmd_notify); vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); } static int vnic_dev_notify_ready(struct vnic_dev *vdev) { u32 *words; unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4; unsigned int i; u32 csum; if (!vdev->notify) return 0; do { csum = 0; memcpy(&vdev->notify_copy, vdev->notify, sizeof(struct vnic_devcmd_notify)); words = (u32 *)&vdev->notify_copy; for (i = 1; i < nwords; i++) csum += words[i]; } while (csum != words[0]); return 1; } int vnic_dev_init(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); } int vnic_dev_link_status(struct vnic_dev *vdev) { if (vdev->linkstatus) return *vdev->linkstatus; if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.link_state; } u32 vnic_dev_port_speed(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.port_speed; } u32 vnic_dev_msg_lvl(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.msglvl; } u32 vnic_dev_mtu(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.mtu; } u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.link_down_cnt; } void vnic_dev_set_intr_mode(struct vnic_dev *vdev, enum vnic_dev_intr_mode intr_mode) { vdev->intr_mode = intr_mode; } enum vnic_dev_intr_mode vnic_dev_get_intr_mode( struct vnic_dev *vdev) { return vdev->intr_mode; } void vnic_dev_unregister(struct vnic_dev *vdev) { if (vdev) { if (vdev->notify) pci_free_consistent(vdev->pdev, sizeof(struct vnic_devcmd_notify), vdev->notify, vdev->notify_pa); if (vdev->linkstatus) pci_free_consistent(vdev->pdev, sizeof(u32), vdev->linkstatus, vdev->linkstatus_pa); if (vdev->stats) pci_free_consistent(vdev->pdev, sizeof(struct vnic_stats), vdev->stats, vdev->stats_pa); if (vdev->fw_info) pci_free_consistent(vdev->pdev, sizeof(struct vnic_devcmd_fw_info), vdev->fw_info, vdev->fw_info_pa); kfree(vdev); } } struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar) { if (!vdev) { vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL); if (!vdev) return NULL; } vdev->priv = priv; vdev->pdev = pdev; if (vnic_dev_discover_res(vdev, bar)) goto err_out; vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); if (!vdev->devcmd) goto err_out; return vdev; err_out: vnic_dev_unregister(vdev); return NULL; }
gpl-2.0
crdroid-devices/android_kernel_htc_msm8960
drivers/staging/media/solo6x10/tw28.c
8339
24851
/* * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include "solo6x10.h" #include "tw28.h" /* XXX: Some of these values are masked into an 8-bit regs, and shifted * around for other 8-bit regs. What are the magic bits in these values? */ #define DEFAULT_HDELAY_NTSC (32 - 4) #define DEFAULT_HACTIVE_NTSC (720 + 16) #define DEFAULT_VDELAY_NTSC (7 - 2) #define DEFAULT_VACTIVE_NTSC (240 + 4) #define DEFAULT_HDELAY_PAL (32 + 4) #define DEFAULT_HACTIVE_PAL (864-DEFAULT_HDELAY_PAL) #define DEFAULT_VDELAY_PAL (6) #define DEFAULT_VACTIVE_PAL (312-DEFAULT_VDELAY_PAL) static u8 tbl_tw2864_template[] = { 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x00 */ 0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x10 */ 0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x20 */ 0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x30 */ 0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA3, 0x00, 0x00, 0x02, 0x00, 0xcc, 0x00, 0x80, 0x44, 0x50, /* 0x80 */ 0x22, 0x01, 0xd8, 0xbc, 0xb8, 0x44, 0x38, 0x00, 0x00, 0x78, 0x72, 0x3e, 0x14, 0xa5, 0xe4, 0x05, /* 0x90 */ 0x00, 0x28, 0x44, 0x44, 0xa0, 0x88, 0x5a, 0x01, 0x08, 0x08, 0x08, 0x08, 0x1a, 0x1a, 0x1a, 0x1a, /* 0xa0 */ 0x00, 0x00, 0x00, 0xf0, 0xf0, 0xf0, 0xf0, 0x44, 0x44, 0x0a, 0x00, 0xff, 0xef, 0xef, 0xef, 0xef, /* 0xb0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x55, 0x00, 0xb1, 0xe4, 0x40, 0x00, 0x77, 0x77, 0x01, 0x13, 0x57, 0x9b, 0xdf, 0x20, /* 0xd0 */ 0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81, 0x10, 0xe0, 0xbb, 0xbb, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00, 0x83, 0xb5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, /* 0xf0 */ 0x64, 0x11, 0x40, 0xaf, 0xff, 0x00, 0x00, 0x00, }; static u8 tbl_tw2865_ntsc_template[] = { 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x00 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x10 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x20 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x48, 0x80, 0x80, 0x00, 0x02, /* 0x30 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x90, 0x68, 0x00, 0x38, 0x80, 0x80, /* 0x40 */ 0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43, 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, /* 0x70 */ 0xE9, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80, 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, /* 0x80 */ 0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00, 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, /* 0x90 */ 0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13, 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1B, 0x1A, /* 0xa0 */ 0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44, 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, /* 0xb0 */ 0xFF, 0xE7, 0xE9, 0xE9, 0xEB, 0xFF, 0xD6, 0xD8, 0xD8, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80, 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, /* 0xd0 */ 0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81, 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00, 0x83, 0xB5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, /* 0xf0 */ 0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0, }; static u8 tbl_tw2865_pal_template[] = { 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x00 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x10 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x20 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x30 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x94, 0x90, 0x48, 0x00, 0x38, 0x7F, 0x80, /* 0x40 */ 0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43, 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, /* 0x70 */ 0xEA, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80, 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, /* 0x80 */ 0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00, 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, /* 0x90 */ 0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13, 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1A, 0x1A, /* 0xa0 */ 0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44, 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, /* 0xb0 */ 0xFF, 0xE7, 0xE9, 0xE9, 0xE9, 0xFF, 0xD7, 0xD8, 0xD9, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80, 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, /* 0xd0 */ 0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81, 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00, 0x83, 0xB5, 0x09, 0x00, 0xA0, 0x00, 0x01, 0x20, /* 0xf0 */ 0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0, }; #define is_tw286x(__solo, __id) (!(__solo->tw2815 & (1 << __id))) static u8 tw_readbyte(struct solo_dev *solo_dev, int chip_id, u8 tw6x_off, u8 tw_off) { if (is_tw286x(solo_dev, chip_id)) return solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw6x_off); else return solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw_off); } static void tw_writebyte(struct solo_dev *solo_dev, int chip_id, u8 tw6x_off, u8 tw_off, u8 val) { if (is_tw286x(solo_dev, chip_id)) solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw6x_off, val); else solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw_off, val); } static void tw_write_and_verify(struct solo_dev *solo_dev, u8 addr, u8 off, u8 val) { int i; for (i = 0; i < 5; i++) { u8 rval = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, addr, off); if (rval == val) return; solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, addr, off, val); msleep_interruptible(1); } /* printk("solo6x10/tw28: Error writing register: %02x->%02x [%02x]\n", addr, off, val); */ } static int tw2865_setup(struct solo_dev *solo_dev, u8 dev_addr) { u8 tbl_tw2865_common[256]; int i; if (solo_dev->video_type == SOLO_VO_FMT_TYPE_PAL) memcpy(tbl_tw2865_common, tbl_tw2865_pal_template, sizeof(tbl_tw2865_common)); else memcpy(tbl_tw2865_common, tbl_tw2865_ntsc_template, sizeof(tbl_tw2865_common)); /* ALINK Mode */ if (solo_dev->nr_chans == 4) { tbl_tw2865_common[0xd2] = 0x01; tbl_tw2865_common[0xcf] = 0x00; } else if (solo_dev->nr_chans == 8) { tbl_tw2865_common[0xd2] = 0x02; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2865_common[0xcf] = 0x80; } else if (solo_dev->nr_chans == 16) { tbl_tw2865_common[0xd2] = 0x03; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2865_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2865_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2865_common[0xcf] = 0x80; } for (i = 0; i < 0xff; i++) { /* Skip read only registers */ if (i >= 0xb8 && i <= 0xc1) continue; if ((i & ~0x30) == 0x00 || (i & ~0x30) == 0x0c || (i & ~0x30) == 0x0d) continue; if (i >= 0xc4 && i <= 0xc7) continue; if (i == 0xfd) continue; tw_write_and_verify(solo_dev, dev_addr, i, tbl_tw2865_common[i]); } return 0; } static int tw2864_setup(struct solo_dev *solo_dev, u8 dev_addr) { u8 tbl_tw2864_common[sizeof(tbl_tw2864_template)]; int i; memcpy(tbl_tw2864_common, tbl_tw2864_template, sizeof(tbl_tw2864_common)); if (solo_dev->tw2865 == 0) { /* IRQ Mode */ if (solo_dev->nr_chans == 4) { tbl_tw2864_common[0xd2] = 0x01; tbl_tw2864_common[0xcf] = 0x00; } else if (solo_dev->nr_chans == 8) { tbl_tw2864_common[0xd2] = 0x02; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x40; } else if (solo_dev->nr_chans == 16) { tbl_tw2864_common[0xd2] = 0x03; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2864_common[0xcf] = 0x40; } } else { /* ALINK Mode. Assumes that the first tw28xx is a * 2865 and these are in cascade. */ for (i = 0; i <= 4; i++) tbl_tw2864_common[0x08 | i << 4] = 0x12; if (solo_dev->nr_chans == 8) { tbl_tw2864_common[0xd2] = 0x02; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x80; } else if (solo_dev->nr_chans == 16) { tbl_tw2864_common[0xd2] = 0x03; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2864_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2864_common[0xcf] = 0x80; } } /* NTSC or PAL */ if (solo_dev->video_type == SOLO_VO_FMT_TYPE_PAL) { for (i = 0; i < 4; i++) { tbl_tw2864_common[0x07 | (i << 4)] |= 0x10; tbl_tw2864_common[0x08 | (i << 4)] |= 0x06; tbl_tw2864_common[0x0a | (i << 4)] |= 0x08; tbl_tw2864_common[0x0b | (i << 4)] |= 0x13; tbl_tw2864_common[0x0e | (i << 4)] |= 0x01; } tbl_tw2864_common[0x9d] = 0x90; tbl_tw2864_common[0xf3] = 0x00; tbl_tw2864_common[0xf4] = 0xa0; } for (i = 0; i < 0xff; i++) { /* Skip read only registers */ if (i >= 0xb8 && i <= 0xc1) continue; if ((i & ~0x30) == 0x00 || (i & ~0x30) == 0x0c || (i & ~0x30) == 0x0d) continue; if (i == 0x74 || i == 0x77 || i == 0x78 || i == 0x79 || i == 0x7a) continue; if (i == 0xfd) continue; tw_write_and_verify(solo_dev, dev_addr, i, tbl_tw2864_common[i]); } return 0; } static int tw2815_setup(struct solo_dev *solo_dev, u8 dev_addr) { u8 tbl_ntsc_tw2815_common[] = { 0x00, 0xc8, 0x20, 0xd0, 0x06, 0xf0, 0x08, 0x80, 0x80, 0x80, 0x80, 0x02, 0x06, 0x00, 0x11, }; u8 tbl_pal_tw2815_common[] = { 0x00, 0x88, 0x20, 0xd0, 0x05, 0x20, 0x28, 0x80, 0x80, 0x80, 0x80, 0x82, 0x06, 0x00, 0x11, }; u8 tbl_tw2815_sfr[] = { 0x00, 0x00, 0x00, 0xc0, 0x45, 0xa0, 0xd0, 0x2f, /* 0x00 */ 0x64, 0x80, 0x80, 0x82, 0x82, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x00, 0x00, 0x80, 0x06, 0x00, /* 0x10 */ 0x00, 0x00, 0x00, 0xff, 0x8f, 0x00, 0x00, 0x00, 0x88, 0x88, 0xc0, 0x00, 0x20, 0x64, 0xa8, 0xec, /* 0x20 */ 0x31, 0x75, 0xb9, 0xfd, 0x00, 0x00, 0x88, 0x88, 0x88, 0x11, 0x00, 0x88, 0x88, 0x00, /* 0x30 */ }; u8 *tbl_tw2815_common; int i; int ch; tbl_ntsc_tw2815_common[0x06] = 0; /* Horizontal Delay Control */ tbl_ntsc_tw2815_common[0x02] = DEFAULT_HDELAY_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= 0x03 & (DEFAULT_HDELAY_NTSC >> 8); /* Horizontal Active Control */ tbl_ntsc_tw2815_common[0x03] = DEFAULT_HACTIVE_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= ((0x03 & (DEFAULT_HACTIVE_NTSC >> 8)) << 2); /* Vertical Delay Control */ tbl_ntsc_tw2815_common[0x04] = DEFAULT_VDELAY_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VDELAY_NTSC >> 8)) << 4); /* Vertical Active Control */ tbl_ntsc_tw2815_common[0x05] = DEFAULT_VACTIVE_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VACTIVE_NTSC >> 8)) << 5); tbl_pal_tw2815_common[0x06] = 0; /* Horizontal Delay Control */ tbl_pal_tw2815_common[0x02] = DEFAULT_HDELAY_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= 0x03 & (DEFAULT_HDELAY_PAL >> 8); /* Horizontal Active Control */ tbl_pal_tw2815_common[0x03] = DEFAULT_HACTIVE_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= ((0x03 & (DEFAULT_HACTIVE_PAL >> 8)) << 2); /* Vertical Delay Control */ tbl_pal_tw2815_common[0x04] = DEFAULT_VDELAY_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VDELAY_PAL >> 8)) << 4); /* Vertical Active Control */ tbl_pal_tw2815_common[0x05] = DEFAULT_VACTIVE_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VACTIVE_PAL >> 8)) << 5); tbl_tw2815_common = (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) ? tbl_ntsc_tw2815_common : tbl_pal_tw2815_common; /* Dual ITU-R BT.656 format */ tbl_tw2815_common[0x0d] |= 0x04; /* Audio configuration */ tbl_tw2815_sfr[0x62 - 0x40] &= ~(3 << 6); if (solo_dev->nr_chans == 4) { tbl_tw2815_sfr[0x63 - 0x40] |= 1; tbl_tw2815_sfr[0x62 - 0x40] |= 3 << 6; } else if (solo_dev->nr_chans == 8) { tbl_tw2815_sfr[0x63 - 0x40] |= 2; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2815_sfr[0x62 - 0x40] |= 1 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2815_sfr[0x62 - 0x40] |= 2 << 6; } else if (solo_dev->nr_chans == 16) { tbl_tw2815_sfr[0x63 - 0x40] |= 3; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2815_sfr[0x62 - 0x40] |= 1 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2815_sfr[0x62 - 0x40] |= 2 << 6; } /* Output mode of R_ADATM pin (0 mixing, 1 record) */ /* tbl_tw2815_sfr[0x63 - 0x40] |= 0 << 2; */ /* 8KHz, used to be 16KHz, but changed for remote client compat */ tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 2; tbl_tw2815_sfr[0x6c - 0x40] |= 0 << 2; /* Playback of right channel */ tbl_tw2815_sfr[0x6c - 0x40] |= 1 << 5; /* Reserved value (XXX ??) */ tbl_tw2815_sfr[0x5c - 0x40] |= 1 << 5; /* Analog output gain and mix ratio playback on full */ tbl_tw2815_sfr[0x70 - 0x40] |= 0xff; /* Select playback audio and mute all except */ tbl_tw2815_sfr[0x71 - 0x40] |= 0x10; tbl_tw2815_sfr[0x6d - 0x40] |= 0x0f; /* End of audio configuration */ for (ch = 0; ch < 4; ch++) { tbl_tw2815_common[0x0d] &= ~3; switch (ch) { case 0: tbl_tw2815_common[0x0d] |= 0x21; break; case 1: tbl_tw2815_common[0x0d] |= 0x20; break; case 2: tbl_tw2815_common[0x0d] |= 0x23; break; case 3: tbl_tw2815_common[0x0d] |= 0x22; break; } for (i = 0; i < 0x0f; i++) { if (i == 0x00) continue; /* read-only */ solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, dev_addr, (ch * 0x10) + i, tbl_tw2815_common[i]); } } for (i = 0x40; i < 0x76; i++) { /* Skip read-only and nop registers */ if (i == 0x40 || i == 0x59 || i == 0x5a || i == 0x5d || i == 0x5e || i == 0x5f) continue; solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, dev_addr, i, tbl_tw2815_sfr[i - 0x40]); } return 0; } #define FIRST_ACTIVE_LINE 0x0008 #define LAST_ACTIVE_LINE 0x0102 static void saa7128_setup(struct solo_dev *solo_dev) { int i; unsigned char regs[128] = { 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1C, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x59, 0x1d, 0x75, 0x3f, 0x06, 0x3f, 0x00, 0x00, 0x1c, 0x33, 0x00, 0x3f, 0x00, 0x00, 0x3f, 0x00, 0x1a, 0x1a, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x10, 0x97, 0x4c, 0x18, 0x9b, 0x93, 0x9f, 0xff, 0x7c, 0x34, 0x3f, 0x3f, 0x3f, 0x83, 0x83, 0x80, 0x0d, 0x0f, 0xc3, 0x06, 0x02, 0x80, 0x71, 0x77, 0xa7, 0x67, 0x66, 0x2e, 0x7b, 0x11, 0x4f, 0x1f, 0x7c, 0xf0, 0x21, 0x77, 0x41, 0x88, 0x41, 0x12, 0xed, 0x10, 0x10, 0x00, 0x41, 0xc3, 0x00, 0x3e, 0xb8, 0x02, 0x00, 0x00, 0x00, 0x00, 0x08, 0xff, 0x80, 0x00, 0xff, 0xff, }; regs[0x7A] = FIRST_ACTIVE_LINE & 0xff; regs[0x7B] = LAST_ACTIVE_LINE & 0xff; regs[0x7C] = ((1 << 7) | (((LAST_ACTIVE_LINE >> 8) & 1) << 6) | (((FIRST_ACTIVE_LINE >> 8) & 1) << 4)); /* PAL: XXX: We could do a second set of regs to avoid this */ if (solo_dev->video_type != SOLO_VO_FMT_TYPE_NTSC) { regs[0x28] = 0xE1; regs[0x5A] = 0x0F; regs[0x61] = 0x02; regs[0x62] = 0x35; regs[0x63] = 0xCB; regs[0x64] = 0x8A; regs[0x65] = 0x09; regs[0x66] = 0x2A; regs[0x6C] = 0xf1; regs[0x6E] = 0x20; regs[0x7A] = 0x06 + 12; regs[0x7b] = 0x24 + 12; regs[0x7c] |= 1 << 6; } /* First 0x25 bytes are read-only? */ for (i = 0x26; i < 128; i++) { if (i == 0x60 || i == 0x7D) continue; solo_i2c_writebyte(solo_dev, SOLO_I2C_SAA, 0x46, i, regs[i]); } return; } int solo_tw28_init(struct solo_dev *solo_dev) { int i; u8 value; /* Detect techwell chip type */ for (i = 0; i < TW_NUM_CHIP; i++) { value = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(i), 0xFF); switch (value >> 3) { case 0x18: solo_dev->tw2865 |= 1 << i; solo_dev->tw28_cnt++; break; case 0x0c: solo_dev->tw2864 |= 1 << i; solo_dev->tw28_cnt++; break; default: value = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(i), 0x59); if ((value >> 3) == 0x04) { solo_dev->tw2815 |= 1 << i; solo_dev->tw28_cnt++; } } } if (!solo_dev->tw28_cnt) return -EINVAL; saa7128_setup(solo_dev); for (i = 0; i < solo_dev->tw28_cnt; i++) { if ((solo_dev->tw2865 & (1 << i))) tw2865_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i)); else if ((solo_dev->tw2864 & (1 << i))) tw2864_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i)); else tw2815_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i)); } dev_info(&solo_dev->pdev->dev, "Initialized %d tw28xx chip%s:", solo_dev->tw28_cnt, solo_dev->tw28_cnt == 1 ? "" : "s"); if (solo_dev->tw2865) printk(" tw2865[%d]", hweight32(solo_dev->tw2865)); if (solo_dev->tw2864) printk(" tw2864[%d]", hweight32(solo_dev->tw2864)); if (solo_dev->tw2815) printk(" tw2815[%d]", hweight32(solo_dev->tw2815)); printk("\n"); return 0; } /* * We accessed the video status signal in the Techwell chip through * iic/i2c because the video status reported by register REG_VI_STATUS1 * (address 0x012C) of the SOLO6010 chip doesn't give the correct video * status signal values. */ int tw28_get_video_status(struct solo_dev *solo_dev, u8 ch) { u8 val, chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; val = tw_readbyte(solo_dev, chip_num, TW286X_AV_STAT_ADDR, TW_AV_STAT_ADDR) & 0x0f; return val & (1 << ch) ? 1 : 0; } #if 0 /* Status of audio from up to 4 techwell chips are combined into 1 variable. * See techwell datasheet for details. */ u16 tw28_get_audio_status(struct solo_dev *solo_dev) { u8 val; u16 status = 0; int i; for (i = 0; i < solo_dev->tw28_cnt; i++) { val = (tw_readbyte(solo_dev, i, TW286X_AV_STAT_ADDR, TW_AV_STAT_ADDR) & 0xf0) >> 4; status |= val << (i * 4); } return status; } #endif int tw28_set_ctrl_val(struct solo_dev *solo_dev, u32 ctrl, u8 ch, s32 val) { char sval; u8 chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; if (val > 255 || val < 0) return -ERANGE; switch (ctrl) { case V4L2_CID_SHARPNESS: /* Only 286x has sharpness */ if (val > 0x0f || val < 0) return -ERANGE; if (is_tw286x(solo_dev, chip_num)) { u8 v = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SHARPNESS(chip_num)); v &= 0xf0; v |= val; solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SHARPNESS(chip_num), v); } else if (val != 0) return -ERANGE; break; case V4L2_CID_HUE: if (is_tw286x(solo_dev, chip_num)) sval = val - 128; else sval = (char)val; tw_writebyte(solo_dev, chip_num, TW286x_HUE_ADDR(ch), TW_HUE_ADDR(ch), sval); break; case V4L2_CID_SATURATION: if (is_tw286x(solo_dev, chip_num)) { solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SATURATIONU_ADDR(ch), val); } tw_writebyte(solo_dev, chip_num, TW286x_SATURATIONV_ADDR(ch), TW_SATURATION_ADDR(ch), val); break; case V4L2_CID_CONTRAST: tw_writebyte(solo_dev, chip_num, TW286x_CONTRAST_ADDR(ch), TW_CONTRAST_ADDR(ch), val); break; case V4L2_CID_BRIGHTNESS: if (is_tw286x(solo_dev, chip_num)) sval = val - 128; else sval = (char)val; tw_writebyte(solo_dev, chip_num, TW286x_BRIGHTNESS_ADDR(ch), TW_BRIGHTNESS_ADDR(ch), sval); break; default: return -EINVAL; } return 0; } int tw28_get_ctrl_val(struct solo_dev *solo_dev, u32 ctrl, u8 ch, s32 *val) { u8 rval, chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; switch (ctrl) { case V4L2_CID_SHARPNESS: /* Only 286x has sharpness */ if (is_tw286x(solo_dev, chip_num)) { rval = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SHARPNESS(chip_num)); *val = rval & 0x0f; } else *val = 0; break; case V4L2_CID_HUE: rval = tw_readbyte(solo_dev, chip_num, TW286x_HUE_ADDR(ch), TW_HUE_ADDR(ch)); if (is_tw286x(solo_dev, chip_num)) *val = (s32)((char)rval) + 128; else *val = rval; break; case V4L2_CID_SATURATION: *val = tw_readbyte(solo_dev, chip_num, TW286x_SATURATIONU_ADDR(ch), TW_SATURATION_ADDR(ch)); break; case V4L2_CID_CONTRAST: *val = tw_readbyte(solo_dev, chip_num, TW286x_CONTRAST_ADDR(ch), TW_CONTRAST_ADDR(ch)); break; case V4L2_CID_BRIGHTNESS: rval = tw_readbyte(solo_dev, chip_num, TW286x_BRIGHTNESS_ADDR(ch), TW_BRIGHTNESS_ADDR(ch)); if (is_tw286x(solo_dev, chip_num)) *val = (s32)((char)rval) + 128; else *val = rval; break; default: return -EINVAL; } return 0; } #if 0 /* * For audio output volume, the output channel is only 1. In this case we * don't need to offset TW_CHIP_OFFSET_ADDR. The TW_CHIP_OFFSET_ADDR used * is the base address of the techwell chip. */ void tw2815_Set_AudioOutVol(struct solo_dev *solo_dev, unsigned int u_val) { unsigned int val; unsigned int chip_num; chip_num = (solo_dev->nr_chans - 1) / 4; val = tw_readbyte(solo_dev, chip_num, TW286x_AUDIO_OUTPUT_VOL_ADDR, TW_AUDIO_OUTPUT_VOL_ADDR); u_val = (val & 0x0f) | (u_val << 4); tw_writebyte(solo_dev, chip_num, TW286x_AUDIO_OUTPUT_VOL_ADDR, TW_AUDIO_OUTPUT_VOL_ADDR, u_val); } #endif u8 tw28_get_audio_gain(struct solo_dev *solo_dev, u8 ch) { u8 val; u8 chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; val = tw_readbyte(solo_dev, chip_num, TW286x_AUDIO_INPUT_GAIN_ADDR(ch), TW_AUDIO_INPUT_GAIN_ADDR(ch)); return (ch % 2) ? (val >> 4) : (val & 0x0f); } void tw28_set_audio_gain(struct solo_dev *solo_dev, u8 ch, u8 val) { u8 old_val; u8 chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; old_val = tw_readbyte(solo_dev, chip_num, TW286x_AUDIO_INPUT_GAIN_ADDR(ch), TW_AUDIO_INPUT_GAIN_ADDR(ch)); val = (old_val & ((ch % 2) ? 0x0f : 0xf0)) | ((ch % 2) ? (val << 4) : val); tw_writebyte(solo_dev, chip_num, TW286x_AUDIO_INPUT_GAIN_ADDR(ch), TW_AUDIO_INPUT_GAIN_ADDR(ch), val); }
gpl-2.0
FreeProjectAce/protou_kernel
drivers/rapidio/rio-sysfs.c
8339
6871
/* * RapidIO sysfs attributes and support * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/stat.h> #include <linux/capability.h> #include "rio.h" /* Sysfs support */ #define rio_config_attr(field, format_string) \ static ssize_t \ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct rio_dev *rdev = to_rio_dev(dev); \ \ return sprintf(buf, format_string, rdev->field); \ } \ rio_config_attr(did, "0x%04x\n"); rio_config_attr(vid, "0x%04x\n"); rio_config_attr(device_rev, "0x%08x\n"); rio_config_attr(asm_did, "0x%04x\n"); rio_config_attr(asm_vid, "0x%04x\n"); rio_config_attr(asm_rev, "0x%04x\n"); rio_config_attr(destid, "0x%04x\n"); rio_config_attr(hopcount, "0x%02x\n"); static ssize_t routes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); char *str = buf; int i; for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); i++) { if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE) continue; str += sprintf(str, "%04x %02x\n", i, rdev->rswitch->route_table[i]); } return (str - buf); } static ssize_t lprev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); return sprintf(buf, "%s\n", (rdev->prev) ? rio_name(rdev->prev) : "root"); } static ssize_t lnext_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); char *str = buf; int i; if (rdev->pef & RIO_PEF_SWITCH) { for (i = 0; i < RIO_GET_TOTAL_PORTS(rdev->swpinfo); i++) { if (rdev->rswitch->nextdev[i]) str += sprintf(str, "%s\n", rio_name(rdev->rswitch->nextdev[i])); else str += sprintf(str, "null\n"); } } return str - buf; } struct device_attribute rio_dev_attrs[] = { __ATTR_RO(did), __ATTR_RO(vid), __ATTR_RO(device_rev), __ATTR_RO(asm_did), __ATTR_RO(asm_vid), __ATTR_RO(asm_rev), __ATTR_RO(lprev), __ATTR_RO(destid), __ATTR_NULL, }; static DEVICE_ATTR(routes, S_IRUGO, routes_show, NULL); static DEVICE_ATTR(lnext, S_IRUGO, lnext_show, NULL); static DEVICE_ATTR(hopcount, S_IRUGO, hopcount_show, NULL); static ssize_t rio_read_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct rio_dev *dev = to_rio_dev(container_of(kobj, struct device, kobj)); unsigned int size = 0x100; loff_t init_off = off; u8 *data = (u8 *) buf; /* Several chips lock up trying to read undefined config space */ if (capable(CAP_SYS_ADMIN)) size = RIO_MAINT_SPACE_SZ; if (off >= size) return 0; if (off + count > size) { size -= off; count = size; } else { size = count; } if ((off & 1) && size) { u8 val; rio_read_config_8(dev, off, &val); data[off - init_off] = val; off++; size--; } if ((off & 3) && size > 2) { u16 val; rio_read_config_16(dev, off, &val); data[off - init_off] = (val >> 8) & 0xff; data[off - init_off + 1] = val & 0xff; off += 2; size -= 2; } while (size > 3) { u32 val; rio_read_config_32(dev, off, &val); data[off - init_off] = (val >> 24) & 0xff; data[off - init_off + 1] = (val >> 16) & 0xff; data[off - init_off + 2] = (val >> 8) & 0xff; data[off - init_off + 3] = val & 0xff; off += 4; size -= 4; } if (size >= 2) { u16 val; rio_read_config_16(dev, off, &val); data[off - init_off] = (val >> 8) & 0xff; data[off - init_off + 1] = val & 0xff; off += 2; size -= 2; } if (size > 0) { u8 val; rio_read_config_8(dev, off, &val); data[off - init_off] = val; off++; --size; } return count; } static ssize_t rio_write_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct rio_dev *dev = to_rio_dev(container_of(kobj, struct device, kobj)); unsigned int size = count; loff_t init_off = off; u8 *data = (u8 *) buf; if (off >= RIO_MAINT_SPACE_SZ) return 0; if (off + count > RIO_MAINT_SPACE_SZ) { size = RIO_MAINT_SPACE_SZ - off; count = size; } if ((off & 1) && size) { rio_write_config_8(dev, off, data[off - init_off]); off++; size--; } if ((off & 3) && (size > 2)) { u16 val = data[off - init_off + 1]; val |= (u16) data[off - init_off] << 8; rio_write_config_16(dev, off, val); off += 2; size -= 2; } while (size > 3) { u32 val = data[off - init_off + 3]; val |= (u32) data[off - init_off + 2] << 8; val |= (u32) data[off - init_off + 1] << 16; val |= (u32) data[off - init_off] << 24; rio_write_config_32(dev, off, val); off += 4; size -= 4; } if (size >= 2) { u16 val = data[off - init_off + 1]; val |= (u16) data[off - init_off] << 8; rio_write_config_16(dev, off, val); off += 2; size -= 2; } if (size) { rio_write_config_8(dev, off, data[off - init_off]); off++; --size; } return count; } static struct bin_attribute rio_config_attr = { .attr = { .name = "config", .mode = S_IRUGO | S_IWUSR, }, .size = RIO_MAINT_SPACE_SZ, .read = rio_read_config, .write = rio_write_config, }; /** * rio_create_sysfs_dev_files - create RIO specific sysfs files * @rdev: device whose entries should be created * * Create files when @rdev is added to sysfs. */ int rio_create_sysfs_dev_files(struct rio_dev *rdev) { int err = 0; err = device_create_bin_file(&rdev->dev, &rio_config_attr); if (!err && (rdev->pef & RIO_PEF_SWITCH)) { err |= device_create_file(&rdev->dev, &dev_attr_routes); err |= device_create_file(&rdev->dev, &dev_attr_lnext); err |= device_create_file(&rdev->dev, &dev_attr_hopcount); if (!err && rdev->rswitch->sw_sysfs) err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE); } if (err) pr_warning("RIO: Failed to create attribute file(s) for %s\n", rio_name(rdev)); return err; } /** * rio_remove_sysfs_dev_files - cleanup RIO specific sysfs files * @rdev: device whose entries we should free * * Cleanup when @rdev is removed from sysfs. */ void rio_remove_sysfs_dev_files(struct rio_dev *rdev) { device_remove_bin_file(&rdev->dev, &rio_config_attr); if (rdev->pef & RIO_PEF_SWITCH) { device_remove_file(&rdev->dev, &dev_attr_routes); device_remove_file(&rdev->dev, &dev_attr_lnext); device_remove_file(&rdev->dev, &dev_attr_hopcount); if (rdev->rswitch->sw_sysfs) rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE); } }
gpl-2.0
czobor/phablet_kernel_samsung_msm7x30-common
drivers/staging/media/solo6x10/gpio.c
8339
2804
/* * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/fs.h> #include <asm/uaccess.h> #include "solo6x10.h" static void solo_gpio_mode(struct solo_dev *solo_dev, unsigned int port_mask, unsigned int mode) { int port; unsigned int ret; ret = solo_reg_read(solo_dev, SOLO_GPIO_CONFIG_0); /* To set gpio */ for (port = 0; port < 16; port++) { if (!((1 << port) & port_mask)) continue; ret &= (~(3 << (port << 1))); ret |= ((mode & 3) << (port << 1)); } solo_reg_write(solo_dev, SOLO_GPIO_CONFIG_0, ret); /* To set extended gpio - sensor */ ret = solo_reg_read(solo_dev, SOLO_GPIO_CONFIG_1); for (port = 0; port < 16; port++) { if (!((1 << (port + 16)) & port_mask)) continue; if (!mode) ret &= ~(1 << port); else ret |= 1 << port; } solo_reg_write(solo_dev, SOLO_GPIO_CONFIG_1, ret); } static void solo_gpio_set(struct solo_dev *solo_dev, unsigned int value) { solo_reg_write(solo_dev, SOLO_GPIO_DATA_OUT, solo_reg_read(solo_dev, SOLO_GPIO_DATA_OUT) | value); } static void solo_gpio_clear(struct solo_dev *solo_dev, unsigned int value) { solo_reg_write(solo_dev, SOLO_GPIO_DATA_OUT, solo_reg_read(solo_dev, SOLO_GPIO_DATA_OUT) & ~value); } static void solo_gpio_config(struct solo_dev *solo_dev) { /* Video reset */ solo_gpio_mode(solo_dev, 0x30, 1); solo_gpio_clear(solo_dev, 0x30); udelay(100); solo_gpio_set(solo_dev, 0x30); udelay(100); /* Warning: Don't touch the next line unless you're sure of what * you're doing: first four gpio [0-3] are used for video. */ solo_gpio_mode(solo_dev, 0x0f, 2); /* We use bit 8-15 of SOLO_GPIO_CONFIG_0 for relay purposes */ solo_gpio_mode(solo_dev, 0xff00, 1); /* Initially set relay status to 0 */ solo_gpio_clear(solo_dev, 0xff00); } int solo_gpio_init(struct solo_dev *solo_dev) { solo_gpio_config(solo_dev); return 0; } void solo_gpio_exit(struct solo_dev *solo_dev) { solo_gpio_clear(solo_dev, 0x30); solo_gpio_config(solo_dev); }
gpl-2.0
wgoossens/linux-nios2
security/tomoyo/environ.c
9875
3117
/* * security/tomoyo/environ.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" /** * tomoyo_check_env_acl - Check permission for environment variable's name. * * @r: Pointer to "struct tomoyo_request_info". * @ptr: Pointer to "struct tomoyo_acl_info". * * Returns true if granted, false otherwise. */ static bool tomoyo_check_env_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { const struct tomoyo_env_acl *acl = container_of(ptr, typeof(*acl), head); return tomoyo_path_matches_pattern(r->param.environ.name, acl->env); } /** * tomoyo_audit_env_log - Audit environment variable name log. * * @r: Pointer to "struct tomoyo_request_info". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_audit_env_log(struct tomoyo_request_info *r) { return tomoyo_supervisor(r, "misc env %s\n", r->param.environ.name->name); } /** * tomoyo_env_perm - Check permission for environment variable's name. * * @r: Pointer to "struct tomoyo_request_info". * @env: The name of environment variable. * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env) { struct tomoyo_path_info environ; int error; if (!env || !*env) return 0; environ.name = env; tomoyo_fill_path_info(&environ); r->param_type = TOMOYO_TYPE_ENV_ACL; r->param.environ.name = &environ; do { tomoyo_check_acl(r, tomoyo_check_env_acl); error = tomoyo_audit_env_log(r); } while (error == TOMOYO_RETRY_REQUEST); return error; } /** * tomoyo_same_env_acl - Check for duplicated "struct tomoyo_env_acl" entry. * * @a: Pointer to "struct tomoyo_acl_info". * @b: Pointer to "struct tomoyo_acl_info". * * Returns true if @a == @b, false otherwise. */ static bool tomoyo_same_env_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head); return p1->env == p2->env; } /** * tomoyo_write_env - Write "struct tomoyo_env_acl" list. * * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ static int tomoyo_write_env(struct tomoyo_acl_param *param) { struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL }; int error = -ENOMEM; const char *data = tomoyo_read_token(param); if (!tomoyo_correct_word(data) || strchr(data, '=')) return -EINVAL; e.env = tomoyo_get_name(data); if (!e.env) return error; error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_same_env_acl, NULL); tomoyo_put_name(e.env); return error; } /** * tomoyo_write_misc - Update environment variable list. * * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. */ int tomoyo_write_misc(struct tomoyo_acl_param *param) { if (tomoyo_str_starts(&param->data, "env ")) return tomoyo_write_env(param); return -EINVAL; }
gpl-2.0
AospPlus/android_external_busybox
e2fsprogs/old_e2fsprogs/blkid/tag.c
148
9830
/* vi: set sw=4 ts=4: */ /* * tag.c - allocation/initialization/free routines for tag structs * * Copyright (C) 2001 Andreas Dilger * Copyright (C) 2003 Theodore Ts'o * * %Begin-Header% * This file may be redistributed under the terms of the * GNU Lesser General Public License. * %End-Header% */ #include <stdlib.h> #include <string.h> #include <stdio.h> #include "blkidP.h" static blkid_tag blkid_new_tag(void) { blkid_tag tag; tag = xzalloc(sizeof(struct blkid_struct_tag)); INIT_LIST_HEAD(&tag->bit_tags); INIT_LIST_HEAD(&tag->bit_names); return tag; } #ifdef CONFIG_BLKID_DEBUG void blkid_debug_dump_tag(blkid_tag tag) { if (!tag) { printf(" tag: NULL\n"); return; } printf(" tag: %s=\"%s\"\n", tag->bit_name, tag->bit_val); } #endif void blkid_free_tag(blkid_tag tag) { if (!tag) return; DBG(DEBUG_TAG, printf(" freeing tag %s=%s\n", tag->bit_name, tag->bit_val ? tag->bit_val : "(NULL)")); DBG(DEBUG_TAG, blkid_debug_dump_tag(tag)); list_del(&tag->bit_tags); /* list of tags for this device */ list_del(&tag->bit_names); /* list of tags with this type */ free(tag->bit_name); free(tag->bit_val); free(tag); } /* * Find the desired tag on a device. If value is NULL, then the * first such tag is returned, otherwise return only exact tag if found. */ blkid_tag blkid_find_tag_dev(blkid_dev dev, const char *type) { struct list_head *p; if (!dev || !type) return NULL; list_for_each(p, &dev->bid_tags) { blkid_tag tmp = list_entry(p, struct blkid_struct_tag, bit_tags); if (!strcmp(tmp->bit_name, type)) return tmp; } return NULL; } /* * Find the desired tag type in the cache. * We return the head tag for this tag type. */ static blkid_tag blkid_find_head_cache(blkid_cache cache, const char *type) { blkid_tag head = NULL, tmp; struct list_head *p; if (!cache || !type) return NULL; list_for_each(p, &cache->bic_tags) { tmp = list_entry(p, struct blkid_struct_tag, bit_tags); if (!strcmp(tmp->bit_name, type)) { DBG(DEBUG_TAG, printf(" found cache tag head %s\n", type)); head = tmp; break; } } return head; } /* * Set a tag on an existing device. * * If value is NULL, then delete the tagsfrom the device. */ int blkid_set_tag(blkid_dev dev, const char *name, const char *value, const int vlength) { blkid_tag t = 0, head = 0; char *val = NULL; if (!dev || !name) return -BLKID_ERR_PARAM; if (!(val = blkid_strndup(value, vlength)) && value) return -BLKID_ERR_MEM; t = blkid_find_tag_dev(dev, name); if (!value) { blkid_free_tag(t); } else if (t) { if (!strcmp(t->bit_val, val)) { /* Same thing, exit */ free(val); return 0; } free(t->bit_val); t->bit_val = val; } else { /* Existing tag not present, add to device */ if (!(t = blkid_new_tag())) goto errout; t->bit_name = blkid_strdup(name); t->bit_val = val; t->bit_dev = dev; list_add_tail(&t->bit_tags, &dev->bid_tags); if (dev->bid_cache) { head = blkid_find_head_cache(dev->bid_cache, t->bit_name); if (!head) { head = blkid_new_tag(); if (!head) goto errout; DBG(DEBUG_TAG, printf(" creating new cache tag head %s\n", name)); head->bit_name = blkid_strdup(name); if (!head->bit_name) goto errout; list_add_tail(&head->bit_tags, &dev->bid_cache->bic_tags); } list_add_tail(&t->bit_names, &head->bit_names); } } /* Link common tags directly to the device struct */ if (!strcmp(name, "TYPE")) dev->bid_type = val; else if (!strcmp(name, "LABEL")) dev->bid_label = val; else if (!strcmp(name, "UUID")) dev->bid_uuid = val; if (dev->bid_cache) dev->bid_cache->bic_flags |= BLKID_BIC_FL_CHANGED; return 0; errout: blkid_free_tag(t); if (!t) free(val); blkid_free_tag(head); return -BLKID_ERR_MEM; } /* * Parse a "NAME=value" string. This is slightly different than * parse_token, because that will end an unquoted value at a space, while * this will assume that an unquoted value is the rest of the token (e.g. * if we are passed an already quoted string from the command-line we don't * have to both quote and escape quote so that the quotes make it to * us). * * Returns 0 on success, and -1 on failure. */ int blkid_parse_tag_string(const char *token, char **ret_type, char **ret_val) { char *name, *value, *cp; DBG(DEBUG_TAG, printf("trying to parse '%s' as a tag\n", token)); if (!token || !(cp = strchr(token, '='))) return -1; name = blkid_strdup(token); if (!name) return -1; value = name + (cp - token); *value++ = '\0'; if (*value == '"' || *value == '\'') { char c = *value++; if (!(cp = strrchr(value, c))) goto errout; /* missing closing quote */ *cp = '\0'; } value = blkid_strdup(value); if (!value) goto errout; *ret_type = name; *ret_val = value; return 0; errout: free(name); return -1; } /* * Tag iteration routines for the public libblkid interface. * * These routines do not expose the list.h implementation, which are a * contamination of the namespace, and which force us to reveal far, far * too much of our internal implemenation. I'm not convinced I want * to keep list.h in the long term, anyway. It's fine for kernel * programming, but performance is not the #1 priority for this * library, and I really don't like the tradeoff of type-safety for * performance for this application. [tytso:20030125.2007EST] */ /* * This series of functions iterate over all tags in a device */ #define TAG_ITERATE_MAGIC 0x01a5284c struct blkid_struct_tag_iterate { int magic; blkid_dev dev; struct list_head *p; }; blkid_tag_iterate blkid_tag_iterate_begin(blkid_dev dev) { blkid_tag_iterate iter; iter = xmalloc(sizeof(struct blkid_struct_tag_iterate)); iter->magic = TAG_ITERATE_MAGIC; iter->dev = dev; iter->p = dev->bid_tags.next; return iter; } /* * Return 0 on success, -1 on error */ extern int blkid_tag_next(blkid_tag_iterate iter, const char **type, const char **value) { blkid_tag tag; *type = 0; *value = 0; if (!iter || iter->magic != TAG_ITERATE_MAGIC || iter->p == &iter->dev->bid_tags) return -1; tag = list_entry(iter->p, struct blkid_struct_tag, bit_tags); *type = tag->bit_name; *value = tag->bit_val; iter->p = iter->p->next; return 0; } void blkid_tag_iterate_end(blkid_tag_iterate iter) { if (!iter || iter->magic != TAG_ITERATE_MAGIC) return; iter->magic = 0; free(iter); } /* * This function returns a device which matches a particular * type/value pair. If there is more than one device that matches the * search specification, it returns the one with the highest priority * value. This allows us to give preference to EVMS or LVM devices. * * XXX there should also be an interface which uses an iterator so we * can get all of the devices which match a type/value search parameter. */ extern blkid_dev blkid_find_dev_with_tag(blkid_cache cache, const char *type, const char *value) { blkid_tag head; blkid_dev dev; int pri; struct list_head *p; if (!cache || !type || !value) return NULL; blkid_read_cache(cache); DBG(DEBUG_TAG, printf("looking for %s=%s in cache\n", type, value)); try_again: pri = -1; dev = 0; head = blkid_find_head_cache(cache, type); if (head) { list_for_each(p, &head->bit_names) { blkid_tag tmp = list_entry(p, struct blkid_struct_tag, bit_names); if (!strcmp(tmp->bit_val, value) && tmp->bit_dev->bid_pri > pri) { dev = tmp->bit_dev; pri = dev->bid_pri; } } } if (dev && !(dev->bid_flags & BLKID_BID_FL_VERIFIED)) { dev = blkid_verify(cache, dev); if (dev && (dev->bid_flags & BLKID_BID_FL_VERIFIED)) goto try_again; } if (!dev && !(cache->bic_flags & BLKID_BIC_FL_PROBED)) { if (blkid_probe_all(cache) < 0) return NULL; goto try_again; } return dev; } #ifdef TEST_PROGRAM #ifdef HAVE_GETOPT_H #include <getopt.h> #else extern char *optarg; extern int optind; #endif void usage(char *prog) { fprintf(stderr, "Usage: %s [-f blkid_file] [-m debug_mask] device " "[type value]\n", prog); fprintf(stderr, "\tList all tags for a device and exit\n"); exit(1); } int main(int argc, char **argv) { blkid_tag_iterate iter; blkid_cache cache = NULL; blkid_dev dev; int c, ret, found; int flags = BLKID_DEV_FIND; char *tmp; char *file = NULL; char *devname = NULL; char *search_type = NULL; char *search_value = NULL; const char *type, *value; while ((c = getopt (argc, argv, "m:f:")) != EOF) switch (c) { case 'f': file = optarg; break; case 'm': blkid_debug_mask = strtoul (optarg, &tmp, 0); if (*tmp) { fprintf(stderr, "Invalid debug mask: %s\n", optarg); exit(1); } break; case '?': usage(argv[0]); } if (argc > optind) devname = argv[optind++]; if (argc > optind) search_type = argv[optind++]; if (argc > optind) search_value = argv[optind++]; if (!devname || (argc != optind)) usage(argv[0]); if ((ret = blkid_get_cache(&cache, file)) != 0) { fprintf(stderr, "%s: error creating cache (%d)\n", argv[0], ret); exit(1); } dev = blkid_get_dev(cache, devname, flags); if (!dev) { fprintf(stderr, "%s: cannot find device in blkid cache\n", devname); exit(1); } if (search_type) { found = blkid_dev_has_tag(dev, search_type, search_value); printf("Device %s: (%s, %s) %s\n", blkid_dev_devname(dev), search_type, search_value ? search_value : "NULL", found ? "FOUND" : "NOT FOUND"); return !found; } printf("Device %s...\n", blkid_dev_devname(dev)); iter = blkid_tag_iterate_begin(dev); while (blkid_tag_next(iter, &type, &value) == 0) { printf("\tTag %s has value %s\n", type, value); } blkid_tag_iterate_end(iter); blkid_put_cache(cache); return 0; } #endif
gpl-2.0
Altaf-Mahdi/grouper
drivers/gpu/ion/tegra/tegra_ion.c
148
13991
/* * drivers/gpu/tegra/tegra_ion.c * * Copyright (C) 2011 Google, Inc. * Copyright (C) 2011, NVIDIA Corporation. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s():%d: " fmt, __func__, __LINE__ #include <linux/err.h> #include <linux/ion.h> #include <linux/tegra_ion.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <linux/io.h> #include "../ion_priv.h" #define CLIENT_HEAP_MASK 0xFFFFFFFF #define HEAP_FLAGS 0xFF #if !defined(CONFIG_TEGRA_NVMAP) #include "mach/nvmap.h" struct nvmap_device *nvmap_dev; #endif static struct ion_device *idev; static int num_heaps; static struct ion_heap **heaps; static int tegra_ion_pin(struct ion_client *client, unsigned int cmd, unsigned long arg) { struct tegra_ion_pin_data data; int ret; struct ion_handle *on_stack[16]; struct ion_handle **refs = on_stack; int i; bool valid_handle; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; if (data.count) { size_t bytes = data.count * sizeof(struct ion_handle *); if (data.count > ARRAY_SIZE(on_stack)) refs = kmalloc(data.count * sizeof(*refs), GFP_KERNEL); else refs = on_stack; if (!refs) return -ENOMEM; if (copy_from_user(refs, (void *)data.handles, bytes)) { ret = -EFAULT; goto err; } } else return -EINVAL; mutex_lock(&client->lock); for (i = 0; i < data.count; i++) { /* Ignore NULL pointers during unpin operation. */ if (!refs[i] && cmd == TEGRA_ION_UNPIN) continue; valid_handle = ion_handle_validate(client, refs[i]); if (!valid_handle) { WARN(1, "invalid handle passed h=0x%x", (u32)refs[i]); mutex_unlock(&client->lock); ret = -EINVAL; goto err; } } mutex_unlock(&client->lock); if (cmd == TEGRA_ION_PIN) { ion_phys_addr_t addr; size_t len; for (i = 0; i < data.count; i++) { ret = ion_phys(client, refs[i], &addr, &len); if (ret) goto err; ion_handle_get(refs[i]); ret = put_user(addr, &data.addr[i]); if (ret) return ret; } } else if (cmd == TEGRA_ION_UNPIN) { for (i = 0; i < data.count; i++) { if (refs[i]) ion_handle_put(refs[i]); } } err: if (ret) { pr_err("error, ret=0x%x", ret); /* FIXME: undo pinning. */ } if (refs != on_stack) kfree(refs); return ret; } static int tegra_ion_alloc_from_id(struct ion_client *client, unsigned int cmd, unsigned long arg) { struct tegra_ion_id_data data; struct ion_buffer *buffer; struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; buffer = (struct ion_buffer *)data.id; data.handle = ion_import(client, buffer); data.size = buffer->size; if (put_user(data.handle, &user_data->handle)) return -EFAULT; if (put_user(data.size, &user_data->size)) return -EFAULT; return 0; } static int tegra_ion_get_id(struct ion_client *client, unsigned int cmd, unsigned long arg) { bool valid_handle; struct tegra_ion_id_data data; struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, data.handle); mutex_unlock(&client->lock); if (!valid_handle) { WARN(1, "invalid handle passed\n"); return -EINVAL; } pr_debug("h=0x%x, b=0x%x, bref=%d", (u32)data.handle, (u32)data.handle->buffer, atomic_read(&data.handle->buffer->ref.refcount)); if (put_user((unsigned long)ion_handle_buffer(data.handle), &user_data->id)) return -EFAULT; return 0; } static int tegra_ion_cache_maint(struct ion_client *client, unsigned int cmd, unsigned long arg) { wmb(); return 0; } static int tegra_ion_rw(struct ion_client *client, unsigned int cmd, unsigned long arg) { bool valid_handle; struct tegra_ion_rw_data data; char *kern_addr, *src; int ret = 0; size_t copied = 0; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; if (!data.handle || !data.addr || !data.count || !data.elem_size) return -EINVAL; mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, data.handle); mutex_unlock(&client->lock); if (!valid_handle) { WARN(1, "%s: invalid handle passed to get id.\n", __func__); return -EINVAL; } if (data.elem_size == data.mem_stride && data.elem_size == data.user_stride) { data.elem_size *= data.count; data.mem_stride = data.elem_size; data.user_stride = data.elem_size; data.count = 1; } kern_addr = ion_map_kernel(client, data.handle); while (data.count--) { if (data.offset + data.elem_size > data.handle->buffer->size) { WARN(1, "read/write outside of handle\n"); ret = -EFAULT; break; } src = kern_addr + data.offset; if (cmd == TEGRA_ION_READ) ret = copy_to_user((void *)data.addr, src, data.elem_size); else ret = copy_from_user(src, (void *)data.addr, data.elem_size); if (ret) break; copied += data.elem_size; data.addr += data.user_stride; data.offset += data.mem_stride; } ion_unmap_kernel(client, data.handle); return ret; } static int tegra_ion_get_param(struct ion_client *client, unsigned int cmd, unsigned long arg) { bool valid_handle; struct tegra_ion_get_params_data data; struct tegra_ion_get_params_data *user_data = (struct tegra_ion_get_params_data *)arg; struct ion_buffer *buffer; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, data.handle); mutex_unlock(&client->lock); if (!valid_handle) { WARN(1, "%s: invalid handle passed to get id.\n", __func__); return -EINVAL; } buffer = ion_handle_buffer(data.handle); data.align = 4096; data.heap = 1; ion_phys(client, data.handle, &data.addr, &data.size); if (copy_to_user(user_data, &data, sizeof(data))) return -EFAULT; return 0; } static long tegra_ion_ioctl(struct ion_client *client, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; switch (cmd) { case TEGRA_ION_ALLOC_FROM_ID: ret = tegra_ion_alloc_from_id(client, cmd, arg); break; case TEGRA_ION_GET_ID: ret = tegra_ion_get_id(client, cmd, arg); break; case TEGRA_ION_PIN: case TEGRA_ION_UNPIN: ret = tegra_ion_pin(client, cmd, arg); break; case TEGRA_ION_CACHE_MAINT: ret = tegra_ion_cache_maint(client, cmd, arg); break; case TEGRA_ION_READ: case TEGRA_ION_WRITE: ret = tegra_ion_rw(client, cmd, arg); break; case TEGRA_ION_GET_PARAM: ret = tegra_ion_get_param(client, cmd, arg); break; default: WARN(1, "Unknown custom ioctl\n"); return -ENOTTY; } return ret; } int tegra_ion_probe(struct platform_device *pdev) { struct ion_platform_data *pdata = pdev->dev.platform_data; int i; num_heaps = pdata->nr; heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); idev = ion_device_create(tegra_ion_ioctl); if (IS_ERR_OR_NULL(idev)) { kfree(heaps); return PTR_ERR(idev); } /* create the heaps as specified in the board file */ for (i = 0; i < num_heaps; i++) { struct ion_platform_heap *heap_data = &pdata->heaps[i]; heaps[i] = ion_heap_create(heap_data); if (IS_ERR_OR_NULL(heaps[i])) { pr_warn("%s(type:%d id:%d) isn't supported\n", heap_data->name, heap_data->type, heap_data->id); continue; } ion_device_add_heap(idev, heaps[i]); } platform_set_drvdata(pdev, idev); #if !defined(CONFIG_TEGRA_NVMAP) nvmap_dev = (struct nvmap_device *)idev; #endif return 0; } int tegra_ion_remove(struct platform_device *pdev) { struct ion_device *idev = platform_get_drvdata(pdev); int i; ion_device_destroy(idev); for (i = 0; i < num_heaps; i++) ion_heap_destroy(heaps[i]); kfree(heaps); return 0; } static struct platform_driver ion_driver = { .probe = tegra_ion_probe, .remove = tegra_ion_remove, .driver = { .name = "ion-tegra" } }; static int __init ion_init(void) { return platform_driver_register(&ion_driver); } static void __exit ion_exit(void) { platform_driver_unregister(&ion_driver); } fs_initcall(ion_init); module_exit(ion_exit); #if !defined(CONFIG_TEGRA_NVMAP) struct nvmap_client *nvmap_create_client(struct nvmap_device *dev, const char *name) { return ion_client_create(dev, CLIENT_HEAP_MASK, name); } struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size, size_t align, unsigned int flags, unsigned int heap_mask) { return ion_alloc(client, size, align, HEAP_FLAGS); } void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r) { ion_free(client, r); } void *nvmap_mmap(struct nvmap_handle_ref *r) { return ion_map_kernel(r->client, r); } void nvmap_munmap(struct nvmap_handle_ref *r, void *addr) { ion_unmap_kernel(r->client, r); } struct nvmap_client *nvmap_client_get_file(int fd) { return ion_client_get_file(fd); } struct nvmap_client *nvmap_client_get(struct nvmap_client *client) { ion_client_get(client); return client; } void nvmap_client_put(struct nvmap_client *c) { ion_client_put(c); } phys_addr_t nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r) { ion_phys_addr_t addr; size_t len; ion_handle_get(r); ion_phys(c, r, &addr, &len); wmb(); return addr; } phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id) { struct ion_handle *handle; ion_phys_addr_t addr; size_t len; handle = nvmap_convert_handle_u2k(id); ion_phys(c, handle, &addr, &len); return addr; } void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r) { if (r) ion_handle_put(r); } static int nvmap_reloc_pin_array(struct ion_client *client, const struct nvmap_pinarray_elem *arr, int nr, struct ion_handle *gather) { struct ion_handle *last_patch = NULL; void *patch_addr; ion_phys_addr_t pin_addr; size_t len; int i; for (i = 0; i < nr; i++) { struct ion_handle *patch; struct ion_handle *pin; ion_phys_addr_t reloc_addr; /* all of the handles are validated and get'ted prior to * calling this function, so casting is safe here */ pin = (struct ion_handle *)arr[i].pin_mem; if (arr[i].patch_mem == (unsigned long)last_patch) { patch = last_patch; } else if (arr[i].patch_mem == (unsigned long)gather) { patch = gather; } else { if (last_patch) ion_handle_put(last_patch); ion_handle_get((struct ion_handle *)arr[i].patch_mem); patch = (struct ion_handle *)arr[i].patch_mem; if (!patch) return -EPERM; last_patch = patch; } patch_addr = ion_map_kernel(client, patch); patch_addr = patch_addr + arr[i].patch_offset; ion_phys(client, pin, &pin_addr, &len); reloc_addr = pin_addr + arr[i].pin_offset; __raw_writel(reloc_addr, patch_addr); ion_unmap_kernel(client, patch); } if (last_patch) ion_handle_put(last_patch); wmb(); return 0; } int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather, const struct nvmap_pinarray_elem *arr, int nr, struct nvmap_handle **unique) { int i; int count = 0; /* FIXME: take care of duplicate ones & validation. */ for (i = 0; i < nr; i++) { unique[i] = (struct nvmap_handle *)arr[i].pin_mem; nvmap_pin(client, (struct nvmap_handle_ref *)unique[i]); count++; } nvmap_reloc_pin_array((struct ion_client *)client, arr, nr, (struct ion_handle *)gather); return nr; } void nvmap_unpin_handles(struct nvmap_client *client, struct nvmap_handle **h, int nr) { int i; for (i = 0; i < nr; i++) nvmap_unpin(client, h[i]); } int nvmap_patch_word(struct nvmap_client *client, struct nvmap_handle *patch, u32 patch_offset, u32 patch_value) { void *vaddr; u32 *patch_addr; vaddr = ion_map_kernel(client, patch); patch_addr = vaddr + patch_offset; __raw_writel(patch_value, patch_addr); wmb(); ion_unmap_kernel(client, patch); return 0; } struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h); struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client, unsigned long id) { struct ion_handle *handle; handle = (struct ion_handle *)nvmap_convert_handle_u2k(id); pr_debug("id=0x%x, h=0x%x,c=0x%x", (u32)id, (u32)handle, (u32)client); nvmap_handle_get(handle); return handle; } struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client, unsigned long id) { struct ion_buffer *buffer; struct ion_handle *handle; struct ion_client *ion_client = client; handle = (struct ion_handle *)nvmap_convert_handle_u2k(id); pr_debug("id=0x%x, h=0x%x,c=0x%x", (u32)id, (u32)handle, (u32)client); buffer = handle->buffer; handle = ion_handle_create(client, buffer); mutex_lock(&ion_client->lock); ion_handle_add(ion_client, handle); mutex_unlock(&ion_client->lock); pr_debug("dup id=0x%x, h=0x%x", (u32)id, (u32)handle); return handle; } void _nvmap_handle_free(struct nvmap_handle *h) { ion_handle_put(h); } struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client, size_t size, size_t align, unsigned int flags, unsigned int iova_start) { struct ion_handle *h; h = ion_alloc(client, size, align, 0xFF); ion_remap_dma(client, h, iova_start); return h; } void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r) { ion_free(client, r); } struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h) { ion_handle_get(h); return h; } void nvmap_handle_put(struct nvmap_handle *h) { ion_handle_put(h); } #endif
gpl-2.0
perkarom/Shark-E
drivers/misc/modem_if/sipc4_io_device.c
148
41747
/* /linux/drivers/misc/modem_if/modem_io_device.c * * Copyright (C) 2010 Google, Inc. * Copyright (C) 2010 Samsung Electronics. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/if_arp.h> #include <linux/ip.h> #include <linux/if_ether.h> #include <linux/etherdevice.h> #include <linux/device.h> #include <linux/platform_data/modem.h> #ifdef CONFIG_LINK_DEVICE_C2C #include <linux/platform_data/c2c.h> #endif #include "modem_prj.h" #include "modem_utils.h" /* * MAX_RXDATA_SIZE is used at making skb, when it called with page size * it need more bytes to allocate itself (Ex, cache byte, shared info, * padding...) * So, give restriction to allocation size below 1 page to prevent * big pages broken. */ #define MAX_RXDATA_SIZE 0x0E00 /* 4 * 1024 - 512 */ #define MAX_MULTI_FMT_SIZE 0x4000 /* 16 * 1024 */ static const char hdlc_start[1] = { HDLC_START }; static const char hdlc_end[1] = { HDLC_END }; static int rx_iodev_skb(struct sk_buff *skb); static ssize_t show_waketime(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int msec; char *p = buf; struct miscdevice *miscdev = dev_get_drvdata(dev); struct io_device *iod = container_of(miscdev, struct io_device, miscdev); msec = jiffies_to_msecs(iod->waketime); p += sprintf(buf, "raw waketime : %ums\n", msec); return p - buf; } static ssize_t store_waketime(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long msec; int ret; struct miscdevice *miscdev = dev_get_drvdata(dev); struct io_device *iod = container_of(miscdev, struct io_device, miscdev); ret = strict_strtoul(buf, 10, &msec); if (ret) return count; iod->waketime = msecs_to_jiffies(msec); return count; } static struct device_attribute attr_waketime = __ATTR(waketime, S_IRUGO | S_IWUSR, show_waketime, store_waketime); static ssize_t show_loopback(struct device *dev, struct device_attribute *attr, char *buf) { struct miscdevice *miscdev = dev_get_drvdata(dev); struct modem_shared *msd = container_of(miscdev, struct io_device, miscdev)->msd; unsigned char *ip = (unsigned char *)&msd->loopback_ipaddr; char *p = buf; p += sprintf(buf, "%u.%u.%u.%u\n", ip[0], ip[1], ip[2], ip[3]); return p - buf; } static ssize_t store_loopback(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct miscdevice *miscdev = dev_get_drvdata(dev); struct modem_shared *msd = container_of(miscdev, struct io_device, miscdev)->msd; msd->loopback_ipaddr = ipv4str_to_be32(buf, count); return count; } static struct device_attribute attr_loopback = __ATTR(loopback, S_IRUGO | S_IWUSR, show_loopback, store_loopback); static int get_header_size(struct io_device *iod) { switch (iod->format) { case IPC_FMT: return sizeof(struct fmt_hdr); case IPC_RAW: case IPC_MULTI_RAW: return sizeof(struct raw_hdr); case IPC_RFS: return sizeof(struct rfs_hdr); case IPC_BOOT: /* minimum size for transaction align */ return 4; case IPC_RAMDUMP: default: return 0; } } static int get_hdlc_size(struct io_device *iod, char *buf) { struct fmt_hdr *fmt_header; struct raw_hdr *raw_header; struct rfs_hdr *rfs_header; mif_debug("buf : %02x %02x %02x (%d)\n", *buf, *(buf + 1), *(buf + 2), __LINE__); switch (iod->format) { case IPC_FMT: fmt_header = (struct fmt_hdr *)buf; if (iod->mc->mdm_data->ipc_version == SIPC_VER_42) return fmt_header->len & 0x3FFF; else return fmt_header->len; case IPC_RAW: case IPC_MULTI_RAW: raw_header = (struct raw_hdr *)buf; return raw_header->len; case IPC_RFS: rfs_header = (struct rfs_hdr *)buf; return rfs_header->len; default: break; } return 0; } static void *get_header(struct io_device *iod, size_t count, char *frame_header_buf) { struct fmt_hdr *fmt_h; struct raw_hdr *raw_h; struct rfs_hdr *rfs_h; switch (iod->format) { case IPC_FMT: fmt_h = (struct fmt_hdr *)frame_header_buf; fmt_h->len = count + sizeof(struct fmt_hdr); fmt_h->control = 0; return (void *)frame_header_buf; case IPC_RAW: case IPC_MULTI_RAW: raw_h = (struct raw_hdr *)frame_header_buf; raw_h->len = count + sizeof(struct raw_hdr); raw_h->channel = iod->id & 0x1F; raw_h->control = 0; return (void *)frame_header_buf; case IPC_RFS: rfs_h = (struct rfs_hdr *)frame_header_buf; rfs_h->len = count + sizeof(struct raw_hdr); rfs_h->id = iod->id; return (void *)frame_header_buf; default: return 0; } } static inline int calc_padding_size(struct io_device *iod, struct link_device *ld, unsigned len) { if (ld->aligned) return (4 - (len & 0x3)) & 0x3; else return 0; } static inline int rx_hdlc_head_start_check(char *buf) { /* check hdlc head and return size of start byte */ return (buf[0] == HDLC_START) ? SIZE_OF_HDLC_START : -EBADMSG; } static inline int rx_hdlc_tail_check(char *buf) { /* check hdlc tail and return size of tail byte */ return (buf[0] == HDLC_END) ? SIZE_OF_HDLC_END : -EBADMSG; } /* remove hdlc header and store IPC header */ static int rx_hdlc_head_check(struct io_device *iod, struct link_device *ld, char *buf, unsigned rest) { struct header_data *hdr = &fragdata(iod, ld)->h_data; int head_size = get_header_size(iod); int done_len = 0; int len = 0; /* first frame, remove start header 7F */ if (!hdr->start) { len = rx_hdlc_head_start_check(buf); if (len < 0) { mif_err("Wrong HDLC start: 0x%x\n", *buf); return len; /*Wrong hdlc start*/ } mif_debug("check len : %d, rest : %d (%d)\n", len, rest, __LINE__); /* set the start flag of current packet */ hdr->start = HDLC_START; hdr->len = 0; /* debug print */ switch (iod->format) { case IPC_FMT: case IPC_RAW: case IPC_MULTI_RAW: case IPC_RFS: /* TODO: print buf... */ break; case IPC_CMD: case IPC_BOOT: case IPC_RAMDUMP: default: break; } buf += len; done_len += len; rest -= len; /* rest, call by value */ } mif_debug("check len : %d, rest : %d (%d)\n", len, rest, __LINE__); /* store the HDLC header to iod priv */ if (hdr->len < head_size) { len = min(rest, head_size - hdr->len); memcpy(hdr->hdr + hdr->len, buf, len); hdr->len += len; done_len += len; } mif_debug("check done_len : %d, rest : %d (%d)\n", done_len, rest, __LINE__); return done_len; } /* alloc skb and copy data to skb */ static int rx_hdlc_data_check(struct io_device *iod, struct link_device *ld, char *buf, unsigned rest) { struct header_data *hdr = &fragdata(iod, ld)->h_data; struct sk_buff *skb = fragdata(iod, ld)->skb_recv; int head_size = get_header_size(iod); int data_size = get_hdlc_size(iod, hdr->hdr) - head_size; int alloc_size; int len = 0; int done_len = 0; int rest_len = data_size - hdr->frag_len; int continue_len = fragdata(iod, ld)->realloc_offset; mif_debug("head_size : %d, data_size : %d (%d)\n", head_size, data_size, __LINE__); if (continue_len) { /* check the HDLC header*/ if (rx_hdlc_head_start_check(buf) == SIZE_OF_HDLC_START) { rest_len -= (head_size + SIZE_OF_HDLC_START); continue_len += (head_size + SIZE_OF_HDLC_START); } buf += continue_len; rest -= continue_len; done_len += continue_len; fragdata(iod, ld)->realloc_offset = 0; mif_debug("realloc_offset = %d\n", continue_len); } /* first payload data - alloc skb */ if (!skb) { /* make skb data size under MAX_RXDATA_SIZE */ alloc_size = min(data_size, MAX_RXDATA_SIZE); alloc_size = min(alloc_size, rest_len); /* exceptional case for RFS channel * make skb for header info first */ if (iod->format == IPC_RFS && !hdr->frag_len) { skb = rx_alloc_skb(head_size, iod, ld); if (unlikely(!skb)) return -ENOMEM; memcpy(skb_put(skb, head_size), hdr->hdr, head_size); rx_iodev_skb(skb); } /* allocate first packet for data, when its size exceed * MAX_RXDATA_SIZE, this packet will split to * multiple packets */ skb = rx_alloc_skb(alloc_size, iod, ld); if (unlikely(!skb)) { fragdata(iod, ld)->realloc_offset = continue_len; return -ENOMEM; } fragdata(iod, ld)->skb_recv = skb; } while (rest) { /* copy length cannot exceed rest_len */ len = min_t(int, rest_len, rest); /* copy length should be under skb tailroom size */ len = min(len, skb_tailroom(skb)); /* when skb tailroom is bigger than MAX_RXDATA_SIZE * restrict its size to MAX_RXDATA_SIZE just for convinience */ len = min(len, MAX_RXDATA_SIZE); /* copy bytes to skb */ memcpy(skb_put(skb, len), buf, len); /* adjusting variables */ buf += len; rest -= len; done_len += len; rest_len -= len; hdr->frag_len += len; /* check if it is final for this packet sequence */ if (!rest_len || !rest) break; /* more bytes are remain for this packet sequence * pass fully loaded skb to rx queue * and allocate another skb for continues data recv chain */ rx_iodev_skb(skb); fragdata(iod, ld)->skb_recv = NULL; alloc_size = min(rest_len, MAX_RXDATA_SIZE); skb = rx_alloc_skb(alloc_size, iod, ld); if (unlikely(!skb)) { fragdata(iod, ld)->realloc_offset = done_len; return -ENOMEM; } fragdata(iod, ld)->skb_recv = skb; } mif_debug("rest : %d, alloc_size : %d , len : %d (%d)\n", rest, alloc_size, skb->len, __LINE__); return done_len; } static int rx_multi_fmt_frame(struct sk_buff *rx_skb) { struct io_device *iod = skbpriv(rx_skb)->iod; struct link_device *ld = skbpriv(rx_skb)->ld; struct fmt_hdr *fh = (struct fmt_hdr *)fragdata(iod, ld)->h_data.hdr; unsigned int id = fh->control & 0x7F; struct sk_buff *skb = iod->skb[id]; unsigned char *data = fragdata(iod, ld)->skb_recv->data; unsigned int rcvd = fragdata(iod, ld)->skb_recv->len; if (!skb) { /* If there has been no multiple frame with this ID */ if (!(fh->control & 0x80)) { /* It is a single frame because the "more" bit is 0. */ #if 0 mif_err("\n<%s> Rx FMT frame (len %d)\n", iod->name, rcvd); print_sipc4_fmt_frame(data); mif_err("\n"); #endif skb_queue_tail(&iod->sk_rx_q, fragdata(iod, ld)->skb_recv); mif_debug("wake up wq of %s\n", iod->name); wake_up(&iod->wq); return 0; } else { struct fmt_hdr *fh = NULL; skb = rx_alloc_skb(MAX_MULTI_FMT_SIZE, iod, ld); if (!skb) { mif_err("<%d> alloc_skb fail\n", __LINE__); return -ENOMEM; } iod->skb[id] = skb; fh = (struct fmt_hdr *)data; mif_info("Start multi-frame (ID %d, len %d)", id, fh->len); } } /* Start multi-frame processing */ memcpy(skb_put(skb, rcvd), data, rcvd); dev_kfree_skb_any(fragdata(iod, ld)->skb_recv); if (fh->control & 0x80) { /* The last frame has not arrived yet. */ mif_info("Receiving (ID %d, %d bytes)\n", id, skb->len); } else { /* It is the last frame because the "more" bit is 0. */ mif_info("The Last (ID %d, %d bytes received)\n", id, skb->len); #if 0 mif_err("\n<%s> Rx FMT frame (len %d)\n", iod->name, skb->len); print_sipc4_fmt_frame(skb->data); mif_err("\n"); #endif skb_queue_tail(&iod->sk_rx_q, skb); iod->skb[id] = NULL; mif_info("wake up wq of %s\n", iod->name); wake_up(&iod->wq); } return 0; } static int rx_multi_fmt_frame_sipc42(struct sk_buff *rx_skb) { struct io_device *iod = skbpriv(rx_skb)->iod; struct link_device *ld = skbpriv(rx_skb)->ld; struct fmt_hdr *fh = (struct fmt_hdr *)fragdata(iod, ld)->h_data.hdr; unsigned int id = fh->control & 0x7F; struct sk_buff *skb = iod->skb[id]; unsigned char *data = fragdata(iod, ld)->skb_recv->data; unsigned int rcvd = fragdata(iod, ld)->skb_recv->len; u8 ch; struct io_device *real_iod = NULL; ch = (fh->len & 0xC000) >> 14; fh->len = fh->len & 0x3FFF; real_iod = ld->fmt_iods[ch]; if (!real_iod) { mif_err("wrong channel %d\n", ch); return -1; } skbpriv(rx_skb)->real_iod = real_iod; if (!skb) { /* If there has been no multiple frame with this ID */ if (!(fh->control & 0x80)) { /* It is a single frame because the "more" bit is 0. */ #if 0 mif_err("\n<%s> Rx FMT frame (len %d)\n", iod->name, rcvd); print_sipc4_fmt_frame(data); mif_err("\n"); #endif skb_queue_tail(&real_iod->sk_rx_q, fragdata(iod, ld)->skb_recv); mif_debug("wake up wq of %s\n", iod->name); wake_up(&real_iod->wq); return 0; } else { struct fmt_hdr *fh = NULL; skb = rx_alloc_skb(MAX_MULTI_FMT_SIZE, real_iod, ld); if (!skb) { mif_err("alloc_skb fail\n"); return -ENOMEM; } real_iod->skb[id] = skb; fh = (struct fmt_hdr *)data; mif_err("Start multi-frame (ID %d, len %d)", id, fh->len); } } /* Start multi-frame processing */ memcpy(skb_put(skb, rcvd), data, rcvd); dev_kfree_skb_any(fragdata(real_iod, ld)->skb_recv); if (fh->control & 0x80) { /* The last frame has not arrived yet. */ mif_err("Receiving (ID %d, %d bytes)\n", id, skb->len); } else { /* It is the last frame because the "more" bit is 0. */ mif_err("The Last (ID %d, %d bytes received)\n", id, skb->len); #if 0 mif_err("\n<%s> Rx FMT frame (len %d)\n", iod->name, skb->len); print_sipc4_fmt_frame(skb->data); mif_err("\n"); #endif skb_queue_tail(&real_iod->sk_rx_q, skb); real_iod->skb[id] = NULL; mif_info("wake up wq of %s\n", real_iod->name); wake_up(&real_iod->wq); } return 0; } static int rx_iodev_skb_raw(struct sk_buff *skb) { int err = 0; struct io_device *iod = skbpriv(skb)->real_iod; struct net_device *ndev = NULL; struct iphdr *ip_header = NULL; struct ethhdr *ehdr = NULL; const char source[ETH_ALEN] = SOURCE_MAC_ADDR; /* check the real_iod is open? */ /* if (atomic_read(&iod->opened) == 0) { mif_err("<%s> is not opened.\n", iod->name); pr_skb("drop packet", skb); return -ENOENT; } */ switch (iod->io_typ) { case IODEV_MISC: mif_debug("<%s> sk_rx_q.qlen = %d\n", iod->name, iod->sk_rx_q.qlen); skb_queue_tail(&iod->sk_rx_q, skb); wake_up(&iod->wq); return 0; case IODEV_NET: ndev = iod->ndev; if (!ndev) { mif_err("<%s> ndev == NULL", iod->name); return -EINVAL; } skb->dev = ndev; ndev->stats.rx_packets++; ndev->stats.rx_bytes += skb->len; /* check the version of IP */ ip_header = (struct iphdr *)skb->data; if (ip_header->version == IP6VERSION) skb->protocol = htons(ETH_P_IPV6); else skb->protocol = htons(ETH_P_IP); if (iod->use_handover) { skb_push(skb, sizeof(struct ethhdr)); ehdr = (void *)skb->data; memcpy(ehdr->h_dest, ndev->dev_addr, ETH_ALEN); memcpy(ehdr->h_source, source, ETH_ALEN); ehdr->h_proto = skb->protocol; skb->ip_summed = CHECKSUM_UNNECESSARY; skb_reset_mac_header(skb); skb_pull(skb, sizeof(struct ethhdr)); } if (in_irq()) err = netif_rx(skb); else err = netif_rx_ni(skb); if (err != NET_RX_SUCCESS) dev_err(&ndev->dev, "rx error: %d\n", err); return err; default: mif_err("wrong io_type : %d\n", iod->io_typ); return -EINVAL; } } static void rx_iodev_work(struct work_struct *work) { int ret = 0; struct sk_buff *skb = NULL; struct io_device *iod = container_of(work, struct io_device, rx_work.work); while ((skb = skb_dequeue(&iod->sk_rx_q)) != NULL) { ret = rx_iodev_skb_raw(skb); if (ret < 0) { mif_err("<%s> rx_iodev_skb_raw err = %d", iod->name, ret); dev_kfree_skb_any(skb); } else if (ret == NET_RX_DROP) { mif_err("<%s> ret == NET_RX_DROP\n", iod->name); schedule_delayed_work(&iod->rx_work, msecs_to_jiffies(100)); break; } } } static int rx_multipdp(struct sk_buff *skb) { u8 ch; struct io_device *iod = skbpriv(skb)->iod; struct link_device *ld = skbpriv(skb)->ld; struct raw_hdr *raw_header = (struct raw_hdr *)fragdata(iod, ld)->h_data.hdr; struct io_device *real_iod = NULL; ch = raw_header->channel; if (ch == DATA_LOOPBACK_CHANNEL && ld->msd->loopback_ipaddr) ch = RMNET0_CH_ID; real_iod = link_get_iod_with_channel(ld, 0x20 | ch); if (!real_iod) { mif_err("wrong channel %d\n", ch); return -1; } skbpriv(skb)->real_iod = real_iod; skb_queue_tail(&iod->sk_rx_q, skb); mif_debug("sk_rx_qlen:%d\n", iod->sk_rx_q.qlen); schedule_delayed_work(&iod->rx_work, 0); return 0; } /* de-mux function draft */ static int rx_iodev_skb(struct sk_buff *skb) { struct io_device *iod = skbpriv(skb)->iod; switch (iod->format) { case IPC_MULTI_RAW: return rx_multipdp(skb); case IPC_FMT: if (iod->mc->mdm_data->ipc_version == SIPC_VER_42) return rx_multi_fmt_frame_sipc42(skb); else return rx_multi_fmt_frame(skb); case IPC_RFS: default: skb_queue_tail(&iod->sk_rx_q, skb); mif_debug("wake up wq of %s\n", iod->name); wake_up(&iod->wq); return 0; } } static int rx_hdlc_packet(struct io_device *iod, struct link_device *ld, const char *data, unsigned recv_size) { int rest = (int)recv_size; char *buf = (char *)data; int err = 0; int len = 0; unsigned rcvd = 0; if (rest <= 0) goto exit; mif_debug("RX_SIZE = %d, ld: %s\n", rest, ld->name); if (fragdata(iod, ld)->h_data.frag_len) { /* If the fragdata(iod, ld)->h_data.frag_len field is not zero, there is a HDLC frame that is waiting for more data or HDLC_END in the skb (fragdata(iod, ld)->skb_recv). In this case, rx_hdlc_head_check() must be skipped. */ goto data_check; } next_frame: err = len = rx_hdlc_head_check(iod, ld, buf, rest); if (err < 0) goto exit; mif_debug("check len : %d, rest : %d (%d)\n", len, rest, __LINE__); buf += len; rest -= len; if (rest <= 0) goto exit; data_check: /* If the return value of rx_hdlc_data_check() is zero, there remains only HDLC_END that will be received. */ err = len = rx_hdlc_data_check(iod, ld, buf, rest); if (err < 0) goto exit; mif_debug("check len : %d, rest : %d (%d)\n", len, rest, __LINE__); buf += len; rest -= len; if (!rest && fragdata(iod, ld)->h_data.frag_len) { /* Data is being received and more data or HDLC_END does not arrive yet, but there is no more data in the buffer. More data may come within the next frame from the link device. */ return 0; } else if (rest <= 0) goto exit; /* At this point, one HDLC frame except HDLC_END has been received. */ err = len = rx_hdlc_tail_check(buf); if (err < 0) { mif_err("Wrong HDLC end: 0x%02X\n", *buf); goto exit; } mif_debug("check len : %d, rest : %d (%d)\n", len, rest, __LINE__); buf += len; rest -= len; /* At this point, one complete HDLC frame has been received. */ /* The padding size is applied for the next HDLC frame. Zero will be returned by calc_padding_size() if the link device does not require 4-byte aligned access. */ rcvd = get_hdlc_size(iod, fragdata(iod, ld)->h_data.hdr) + (SIZE_OF_HDLC_START + SIZE_OF_HDLC_END); len = calc_padding_size(iod, ld, rcvd); buf += len; rest -= len; if (rest < 0) goto exit; err = rx_iodev_skb(fragdata(iod, ld)->skb_recv); if (err < 0) goto exit; /* initialize header & skb */ fragdata(iod, ld)->skb_recv = NULL; memset(&fragdata(iod, ld)->h_data, 0x00, sizeof(struct header_data)); fragdata(iod, ld)->realloc_offset = 0; if (rest) goto next_frame; exit: /* free buffers. mipi-hsi re-use recv buf */ if (rest < 0) err = -ERANGE; if (err == -ENOMEM) { if (!(fragdata(iod, ld)->h_data.frag_len)) memset(&fragdata(iod, ld)->h_data, 0x00, sizeof(struct header_data)); return err; } if (err < 0 && fragdata(iod, ld)->skb_recv) { dev_kfree_skb_any(fragdata(iod, ld)->skb_recv); fragdata(iod, ld)->skb_recv = NULL; /* clear headers */ memset(&fragdata(iod, ld)->h_data, 0x00, sizeof(struct header_data)); fragdata(iod, ld)->realloc_offset = 0; } return err; } static int rx_rfs_packet(struct io_device *iod, struct link_device *ld, const char *data, unsigned size) { int err = 0; int pad = 0; int rcvd = 0; struct sk_buff *skb; if (data[0] != HDLC_START) { mif_err("Dropping RFS packet ... " "size = %d, start = %02X %02X %02X %02X\n", size, data[0], data[1], data[2], data[3]); return -EINVAL; } if (data[size-1] != HDLC_END) { for (pad = 1; pad < 4; pad++) if (data[(size-1)-pad] == HDLC_END) break; if (pad >= 4) { char *b = (char *)data; unsigned sz = size; mif_err("size %d, No END_FLAG!!!\n", size); mif_err("end = %02X %02X %02X %02X\n", b[sz-4], b[sz-3], b[sz-2], b[sz-1]); return -EINVAL; } else { mif_info("padding = %d\n", pad); } } skb = rx_alloc_skb(size, iod, ld); if (unlikely(!skb)) { mif_err("alloc_skb fail\n"); return -ENOMEM; } /* copy the RFS haeder to skb->data */ rcvd = size - sizeof(hdlc_start) - sizeof(hdlc_end) - pad; memcpy(skb_put(skb, rcvd), ((char *)data + sizeof(hdlc_start)), rcvd); fragdata(iod, ld)->skb_recv = skb; err = rx_iodev_skb(fragdata(iod, ld)->skb_recv); return err; } /* called from link device when a packet arrives for this io device */ static int io_dev_recv_data_from_link_dev(struct io_device *iod, struct link_device *ld, const char *data, unsigned int len) { struct sk_buff *skb; int err; unsigned int alloc_size, rest_len; char *cur; /* check the iod(except IODEV_DUMMY) is open? * if the iod is MULTIPDP, check this data on rx_iodev_skb_raw() * because, we cannot know the channel no in here. */ /* if (iod->io_typ != IODEV_DUMMY && atomic_read(&iod->opened) == 0) { mif_err("<%s> is not opened.\n", iod->name); pr_buffer("drop packet", data, len, 16u); return -ENOENT; } */ switch (iod->format) { case IPC_RFS: #ifdef CONFIG_IPC_CMC22x_OLD_RFS err = rx_rfs_packet(iod, ld, data, len); return err; #endif case IPC_FMT: case IPC_RAW: case IPC_MULTI_RAW: if (iod->waketime) wake_lock_timeout(&iod->wakelock, iod->waketime); err = rx_hdlc_packet(iod, ld, data, len); if (err < 0) mif_err("fail process HDLC frame\n"); return err; case IPC_CMD: /* TODO- handle flow control command from CP */ return 0; case IPC_BOOT: case IPC_RAMDUMP: /* save packet to sk_buff */ skb = rx_alloc_skb(len, iod, ld); if (skb) { mif_debug("boot len : %d\n", len); memcpy(skb_put(skb, len), data, len); skb_queue_tail(&iod->sk_rx_q, skb); mif_debug("skb len : %d\n", skb->len); wake_up(&iod->wq); return len; } /* 32KB page alloc fail case, alloc 3.5K a page.. */ mif_info("(%d)page fail, alloc fragment pages\n", len); rest_len = len; cur = (char *)data; while (rest_len) { alloc_size = min_t(unsigned int, MAX_RXDATA_SIZE, rest_len); skb = rx_alloc_skb(alloc_size, iod, ld); if (!skb) { mif_err("fail alloc skb (%d)\n", __LINE__); return -ENOMEM; } mif_debug("boot len : %d\n", alloc_size); memcpy(skb_put(skb, alloc_size), cur, alloc_size); skb_queue_tail(&iod->sk_rx_q, skb); mif_debug("skb len : %d\n", skb->len); rest_len -= alloc_size; cur += alloc_size; } wake_up(&iod->wq); return len; default: return -EINVAL; } } /* inform the IO device that the modem is now online or offline or * crashing or whatever... */ static void io_dev_modem_state_changed(struct io_device *iod, enum modem_state state) { iod->mc->phone_state = state; mif_err("modem state changed. (iod: %s, state: %d)\n", iod->name, state); if ((state == STATE_CRASH_RESET) || (state == STATE_CRASH_EXIT) || (state == STATE_NV_REBUILDING)) wake_up(&iod->wq); } /** * io_dev_sim_state_changed * @iod: IPC's io_device * @sim_online: SIM is online? */ static void io_dev_sim_state_changed(struct io_device *iod, bool sim_online) { if (atomic_read(&iod->opened) == 0) { mif_err("iod is not opened: %s\n", iod->name); } else if (iod->mc->sim_state.online == sim_online) { mif_err("sim state not changed.\n"); } else { iod->mc->sim_state.online = sim_online; iod->mc->sim_state.changed = true; wake_lock_timeout(&iod->mc->bootd->wakelock, iod->mc->bootd->waketime); mif_err("sim state changed. (iod: %s, state: " "[online=%d, changed=%d])\n", iod->name, iod->mc->sim_state.online, iod->mc->sim_state.changed); wake_up(&iod->wq); } } static int misc_open(struct inode *inode, struct file *filp) { struct io_device *iod = to_io_device(filp->private_data); struct modem_shared *msd = iod->msd; struct link_device *ld; int ret; filp->private_data = (void *)iod; mif_err("iod = %s\n", iod->name); atomic_inc(&iod->opened); list_for_each_entry(ld, &msd->link_dev_list, list) { if (IS_CONNECTED(iod, ld) && ld->init_comm) { ret = ld->init_comm(ld, iod); if (ret < 0) { mif_err("%s: init_comm error: %d\n", ld->name, ret); return ret; } } } return 0; } static int misc_release(struct inode *inode, struct file *filp) { struct io_device *iod = (struct io_device *)filp->private_data; struct modem_shared *msd = iod->msd; struct link_device *ld; mif_err("iod = %s\n", iod->name); atomic_dec(&iod->opened); skb_queue_purge(&iod->sk_rx_q); list_for_each_entry(ld, &msd->link_dev_list, list) { if (IS_CONNECTED(iod, ld) && ld->terminate_comm) ld->terminate_comm(ld, iod); } return 0; } static unsigned int misc_poll(struct file *filp, struct poll_table_struct *wait) { struct io_device *iod = (struct io_device *)filp->private_data; poll_wait(filp, &iod->wq, wait); if ((!skb_queue_empty(&iod->sk_rx_q)) && (iod->mc->phone_state != STATE_OFFLINE)) { return POLLIN | POLLRDNORM; } else if ((iod->mc->phone_state == STATE_CRASH_RESET) || (iod->mc->phone_state == STATE_CRASH_EXIT) || (iod->mc->phone_state == STATE_NV_REBUILDING) || #if defined(CONFIG_SEC_DUAL_MODEM_MODE) (iod->mc->phone_state == STATE_MODEM_SWITCH) || #endif (iod->mc->sim_state.changed)) { if (iod->format == IPC_RAW) { msleep(20); return 0; } return POLLHUP; } else { return 0; } } static long misc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int p_state; struct io_device *iod = (struct io_device *)filp->private_data; struct link_device *ld = get_current_link(iod); char cpinfo_buf[530] = "CP Crash "; unsigned long size; int ret; char str[TASK_COMM_LEN]; mif_debug("cmd = 0x%x\n", cmd); switch (cmd) { case IOCTL_MODEM_ON: mif_debug("misc_ioctl : IOCTL_MODEM_ON\n"); return iod->mc->ops.modem_on(iod->mc); case IOCTL_MODEM_OFF: mif_debug("misc_ioctl : IOCTL_MODEM_OFF\n"); return iod->mc->ops.modem_off(iod->mc); case IOCTL_MODEM_RESET: mif_debug("misc_ioctl : IOCTL_MODEM_RESET\n"); return iod->mc->ops.modem_reset(iod->mc); case IOCTL_MODEM_BOOT_ON: mif_debug("misc_ioctl : IOCTL_MODEM_BOOT_ON\n"); return iod->mc->ops.modem_boot_on(iod->mc); case IOCTL_MODEM_BOOT_OFF: mif_debug("misc_ioctl : IOCTL_MODEM_BOOT_OFF\n"); return iod->mc->ops.modem_boot_off(iod->mc); /* TODO - will remove this command after ril updated */ case IOCTL_MODEM_BOOT_DONE: mif_debug("misc_ioctl : IOCTL_MODEM_BOOT_DONE\n"); return 0; case IOCTL_MODEM_STATUS: mif_debug("misc_ioctl : IOCTL_MODEM_STATUS\n"); p_state = iod->mc->phone_state; if ((p_state == STATE_CRASH_RESET) || (p_state == STATE_CRASH_EXIT)) { mif_err("<%s> send err state : %d\n", iod->name, p_state); } else if (iod->mc->sim_state.changed && !strcmp(get_task_comm(str, get_current()), "rild")) { int s_state = iod->mc->sim_state.online ? STATE_SIM_ATTACH : STATE_SIM_DETACH; iod->mc->sim_state.changed = false; mif_info("SIM states (%d) to %s\n", s_state, str); return s_state; } else if (p_state == STATE_NV_REBUILDING) { mif_info("send nv rebuild state : %d\n", p_state); iod->mc->phone_state = STATE_ONLINE; } return p_state; case IOCTL_MODEM_PROTOCOL_SUSPEND: mif_info("misc_ioctl : IOCTL_MODEM_PROTOCOL_SUSPEND\n"); if (iod->format != IPC_MULTI_RAW) return -EINVAL; iodevs_for_each(iod->msd, iodev_netif_stop, 0); return 0; case IOCTL_MODEM_PROTOCOL_RESUME: mif_info("misc_ioctl : IOCTL_MODEM_PROTOCOL_RESUME\n"); if (iod->format != IPC_MULTI_RAW) return -EINVAL; iodevs_for_each(iod->msd, iodev_netif_wake, 0); return 0; case IOCTL_MODEM_DUMP_START: mif_err("misc_ioctl : IOCTL_MODEM_DUMP_START\n"); return ld->dump_start(ld, iod); case IOCTL_MODEM_DUMP_UPDATE: mif_debug("misc_ioctl : IOCTL_MODEM_DUMP_UPDATE\n"); return ld->dump_update(ld, iod, arg); case IOCTL_MODEM_FORCE_CRASH_EXIT: mif_debug("misc_ioctl : IOCTL_MODEM_FORCE_CRASH_EXIT\n"); if (iod->mc->ops.modem_force_crash_exit) return iod->mc->ops.modem_force_crash_exit(iod->mc); return -EINVAL; case IOCTL_MODEM_CP_UPLOAD: mif_err("misc_ioctl : IOCTL_MODEM_CP_UPLOAD\n"); if (copy_from_user(cpinfo_buf + strlen(cpinfo_buf), (void __user *)arg, MAX_CPINFO_SIZE) != 0) panic("CP Crash"); else panic(cpinfo_buf); return 0; case IOCTL_MODEM_DUMP_RESET: mif_err("misc_ioctl : IOCTL_MODEM_DUMP_RESET\n"); return iod->mc->ops.modem_dump_reset(iod->mc); #if defined(CONFIG_SEC_DUAL_MODEM_MODE) case IOCTL_MODEM_SWITCH_MODEM: mif_err("misc_ioctl : IOCTL_MODEM_SWITCH_MODEM\n"); iod->mc->phone_state = STATE_MODEM_SWITCH; wake_up(&iod->wq); return 0; #endif case IOCTL_MIF_LOG_DUMP: size = MAX_MIF_BUFF_SIZE; ret = copy_to_user((void __user *)arg, &size, sizeof(unsigned long)); if (ret < 0) return -EFAULT; mif_dump_log(iod->mc->msd, iod); return 0; case IOCTL_MIF_DPRAM_DUMP: #ifdef CONFIG_LINK_DEVICE_DPRAM if (iod->mc->mdm_data->link_types & LINKTYPE(LINKDEV_DPRAM)) { size = iod->mc->mdm_data->dpram_ctl->dp_size; ret = copy_to_user((void __user *)arg, &size, sizeof(unsigned long)); if (ret < 0) return -EFAULT; mif_dump_dpram(iod); return 0; } #endif return -EINVAL; default: /* If you need to handle the ioctl for specific link device, * then assign the link ioctl handler to ld->ioctl * It will be call for specific link ioctl */ if (ld->ioctl) return ld->ioctl(ld, iod, cmd, arg); mif_err("misc_ioctl : ioctl 0x%X is not defined.\n", cmd); return -EINVAL; } return 0; } static ssize_t misc_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct io_device *iod = (struct io_device *)filp->private_data; struct link_device *ld = get_current_link(iod); int frame_len = 0; char frame_header_buf[sizeof(struct raw_hdr)]; struct sk_buff *skb; int err; size_t tx_size; /* TODO - check here flow control for only raw data */ frame_len = SIZE_OF_HDLC_START + get_header_size(iod) + count + SIZE_OF_HDLC_END; if (ld->aligned) frame_len += MAX_LINK_PADDING_SIZE; skb = alloc_skb(frame_len, GFP_KERNEL); if (!skb) { mif_err("fail alloc skb (%d)\n", __LINE__); return -ENOMEM; } switch (iod->format) { case IPC_BOOT: case IPC_RAMDUMP: if (copy_from_user(skb_put(skb, count), buf, count) != 0) { dev_kfree_skb_any(skb); return -EFAULT; } break; case IPC_RFS: memcpy(skb_put(skb, SIZE_OF_HDLC_START), hdlc_start, SIZE_OF_HDLC_START); if (copy_from_user(skb_put(skb, count), buf, count) != 0) { dev_kfree_skb_any(skb); return -EFAULT; } memcpy(skb_put(skb, SIZE_OF_HDLC_END), hdlc_end, SIZE_OF_HDLC_END); break; default: memcpy(skb_put(skb, SIZE_OF_HDLC_START), hdlc_start, SIZE_OF_HDLC_START); memcpy(skb_put(skb, get_header_size(iod)), get_header(iod, count, frame_header_buf), get_header_size(iod)); if (copy_from_user(skb_put(skb, count), buf, count) != 0) { dev_kfree_skb_any(skb); return -EFAULT; } memcpy(skb_put(skb, SIZE_OF_HDLC_END), hdlc_end, SIZE_OF_HDLC_END); break; } skb_put(skb, calc_padding_size(iod, ld, skb->len)); #if 0 if (iod->format == IPC_FMT) { mif_err("\n<%s> Tx HDLC FMT frame (len %d)\n", iod->name, skb->len); print_sipc4_hdlc_fmt_frame(skb->data); mif_err("\n"); } #endif #if 0 if (iod->format == IPC_RAW) { mif_err("\n<%s> Tx HDLC RAW frame (len %d)\n", iod->name, skb->len); mif_print_data(skb->data, (skb->len < 64 ? skb->len : 64)); mif_err("\n"); } #endif #if 0 if (iod->format == IPC_RFS) { mif_err("\n<%s> Tx HDLC RFS frame (len %d)\n", iod->name, skb->len); mif_print_data(skb->data, (skb->len < 64 ? skb->len : 64)); mif_err("\n"); } #endif /* send data with sk_buff, link device will put sk_buff * into the specific sk_buff_q and run work-q to send data */ tx_size = skb->len; skbpriv(skb)->iod = iod; skbpriv(skb)->ld = ld; err = ld->send(ld, iod, skb); if (err < 0) { dev_kfree_skb_any(skb); return err; } if (err != tx_size) mif_err("WARNNING: wrong tx size: %s, format=%d " "count=%d, tx_size=%d, return_size=%d", iod->name, iod->format, count, tx_size, err); /* Temporaly enable t he RFS log for debugging IPC RX pedding issue */ if (iod->format == IPC_RFS) mif_info("write rfs size = %d\n", count); return count; } static ssize_t misc_read(struct file *filp, char *buf, size_t count, loff_t *f_pos) { struct io_device *iod = (struct io_device *)filp->private_data; struct sk_buff *skb = NULL; int pktsize = 0; unsigned int rest_len, copy_len; char *cur = buf; skb = skb_dequeue(&iod->sk_rx_q); if (!skb) { mif_err("<%s> no data from sk_rx_q\n", iod->name); return 0; } mif_debug("<%s> skb->len : %d\n", iod->name, skb->len); if (iod->format == IPC_BOOT) { pktsize = rest_len = count; while (rest_len) { if (skb->len > rest_len) { /* BOOT device receviced rx data as serial stream, return data by User requested size */ mif_err("skb->len %d > count %d\n", skb->len, rest_len); pr_skb("BOOT-wRX", skb); if (copy_to_user(cur, skb->data, rest_len) != 0) { dev_kfree_skb_any(skb); return -EFAULT; } cur += rest_len; skb_pull(skb, rest_len); if (skb->len) { mif_info("queue-head, skb->len = %d\n", skb->len); skb_queue_head(&iod->sk_rx_q, skb); } mif_debug("return %u\n", rest_len); return rest_len; } copy_len = min(rest_len, skb->len); if (copy_to_user(cur, skb->data, copy_len) != 0) { dev_kfree_skb_any(skb); return -EFAULT; } cur += skb->len; dev_kfree_skb_any(skb); rest_len -= copy_len; if (!rest_len) break; skb = skb_dequeue(&iod->sk_rx_q); if (!skb) { mif_err("<%s> %d / %d sk_rx_q\n", iod->name, (count - rest_len), count); return count - rest_len; } } } else { if (skb->len > count) { mif_err("<%s> skb->len %d > count %d\n", iod->name, skb->len, count); dev_kfree_skb_any(skb); return -EFAULT; } pktsize = skb->len; if (copy_to_user(buf, skb->data, pktsize) != 0) { dev_kfree_skb_any(skb); return -EFAULT; } if (iod->format == IPC_FMT) mif_debug("copied %d bytes to user\n", pktsize); dev_kfree_skb_any(skb); } return pktsize; } #ifdef CONFIG_LINK_DEVICE_C2C static int misc_mmap(struct file *filp, struct vm_area_struct *vma) { int r = 0; unsigned long size = 0; unsigned long pfn = 0; unsigned long offset = 0; struct io_device *iod = (struct io_device *)filp->private_data; if (!vma) return -EFAULT; size = vma->vm_end - vma->vm_start; offset = vma->vm_pgoff << PAGE_SHIFT; if (offset + size > (C2C_CP_RGN_SIZE + C2C_SH_RGN_SIZE)) { mif_err("offset + size > C2C_CP_RGN_SIZE\n"); return -EINVAL; } /* Set the noncacheable property to the region */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_flags |= VM_RESERVED | VM_IO; pfn = __phys_to_pfn(C2C_CP_RGN_ADDR + offset); r = remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot); if (r) { mif_err("Failed in remap_pfn_range()!!!\n"); return -EAGAIN; } mif_err("VA = 0x%08lx, offset = 0x%lx, size = %lu\n", vma->vm_start, offset, size); return 0; } #endif static const struct file_operations misc_io_fops = { .owner = THIS_MODULE, .open = misc_open, .release = misc_release, .poll = misc_poll, .unlocked_ioctl = misc_ioctl, .write = misc_write, .read = misc_read, #ifdef CONFIG_LINK_DEVICE_C2C .mmap = misc_mmap, #endif }; static int vnet_open(struct net_device *ndev) { struct vnet *vnet = netdev_priv(ndev); netif_start_queue(ndev); atomic_inc(&vnet->iod->opened); return 0; } static int vnet_stop(struct net_device *ndev) { struct vnet *vnet = netdev_priv(ndev); atomic_dec(&vnet->iod->opened); netif_stop_queue(ndev); return 0; } static int vnet_xmit(struct sk_buff *skb, struct net_device *ndev) { int ret = 0; int headroom = 0; int tailroom = 0; struct sk_buff *skb_new = NULL; struct vnet *vnet = netdev_priv(ndev); struct io_device *iod = vnet->iod; struct link_device *ld = get_current_link(iod); struct raw_hdr hd; struct iphdr *ip_header = NULL; /* When use `handover' with Network Bridge, * user -> TCP/IP(kernel) -> bridge device -> TCP/IP(kernel) -> this. * * We remove the one ethernet header of skb before using skb->len, * because the skb has two ethernet headers. */ if (iod->use_handover) { if (iod->id >= PSD_DATA_CHID_BEGIN && iod->id <= PSD_DATA_CHID_END) skb_pull(skb, sizeof(struct ethhdr)); } /* ip loop-back */ ip_header = (struct iphdr *)skb->data; if (iod->msd->loopback_ipaddr && ip_header->daddr == iod->msd->loopback_ipaddr) { swap(ip_header->saddr, ip_header->daddr); hd.channel = DATA_LOOPBACK_CHANNEL; } else { hd.channel = iod->id & 0x1F; } hd.len = skb->len + sizeof(hd); hd.control = 0; headroom = sizeof(hd) + sizeof(hdlc_start); tailroom = sizeof(hdlc_end); if (ld->aligned) tailroom += MAX_LINK_PADDING_SIZE; if (skb_headroom(skb) < headroom || skb_tailroom(skb) < tailroom) { skb_new = skb_copy_expand(skb, headroom, tailroom, GFP_ATOMIC); /* skb_copy_expand success or not, free old skb from caller */ dev_kfree_skb_any(skb); if (!skb_new) return -ENOMEM; } else skb_new = skb; memcpy(skb_push(skb_new, sizeof(hd)), &hd, sizeof(hd)); memcpy(skb_push(skb_new, sizeof(hdlc_start)), hdlc_start, sizeof(hdlc_start)); memcpy(skb_put(skb_new, sizeof(hdlc_end)), hdlc_end, sizeof(hdlc_end)); skb_put(skb_new, calc_padding_size(iod, ld, skb_new->len)); skbpriv(skb_new)->iod = iod; skbpriv(skb_new)->ld = ld; ret = ld->send(ld, iod, skb_new); if (ret < 0) { netif_stop_queue(ndev); dev_kfree_skb_any(skb_new); return NETDEV_TX_BUSY; } ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; return NETDEV_TX_OK; } static struct net_device_ops vnet_ops = { .ndo_open = vnet_open, .ndo_stop = vnet_stop, .ndo_start_xmit = vnet_xmit, }; static void vnet_setup(struct net_device *ndev) { ndev->netdev_ops = &vnet_ops; ndev->type = ARPHRD_PPP; ndev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; ndev->addr_len = 0; ndev->hard_header_len = 0; ndev->tx_queue_len = 1000; ndev->mtu = ETH_DATA_LEN; ndev->watchdog_timeo = 5 * HZ; } static void vnet_setup_ether(struct net_device *ndev) { ndev->netdev_ops = &vnet_ops; ndev->type = ARPHRD_ETHER; ndev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST | IFF_SLAVE; ndev->addr_len = ETH_ALEN; random_ether_addr(ndev->dev_addr); ndev->hard_header_len = 0; ndev->tx_queue_len = 1000; ndev->mtu = ETH_DATA_LEN; ndev->watchdog_timeo = 5 * HZ; } int sipc4_init_io_device(struct io_device *iod) { int ret = 0; struct vnet *vnet; /* Get modem state from modem control device */ iod->modem_state_changed = io_dev_modem_state_changed; iod->sim_state_changed = io_dev_sim_state_changed; /* Get data from link device */ iod->recv = io_dev_recv_data_from_link_dev; /* Register misc or net device */ switch (iod->io_typ) { case IODEV_MISC: init_waitqueue_head(&iod->wq); skb_queue_head_init(&iod->sk_rx_q); INIT_DELAYED_WORK(&iod->rx_work, rx_iodev_work); iod->miscdev.minor = MISC_DYNAMIC_MINOR; iod->miscdev.name = iod->name; iod->miscdev.fops = &misc_io_fops; ret = misc_register(&iod->miscdev); if (ret) mif_err("failed to register misc io device : %s\n", iod->name); break; case IODEV_NET: skb_queue_head_init(&iod->sk_rx_q); if (iod->use_handover) iod->ndev = alloc_netdev(0, iod->name, vnet_setup_ether); else iod->ndev = alloc_netdev(0, iod->name, vnet_setup); if (!iod->ndev) { mif_err("failed to alloc netdev\n"); return -ENOMEM; } ret = register_netdev(iod->ndev); if (ret) free_netdev(iod->ndev); mif_debug("(iod:0x%p)\n", iod); vnet = netdev_priv(iod->ndev); mif_debug("(vnet:0x%p)\n", vnet); vnet->iod = iod; break; case IODEV_DUMMY: skb_queue_head_init(&iod->sk_rx_q); INIT_DELAYED_WORK(&iod->rx_work, rx_iodev_work); iod->miscdev.minor = MISC_DYNAMIC_MINOR; iod->miscdev.name = iod->name; iod->miscdev.fops = &misc_io_fops; ret = misc_register(&iod->miscdev); if (ret) mif_err("failed to register misc io device : %s\n", iod->name); ret = device_create_file(iod->miscdev.this_device, &attr_waketime); if (ret) mif_err("failed to create `waketime' file : %s\n", iod->name); ret = device_create_file(iod->miscdev.this_device, &attr_loopback); if (ret) mif_err("failed to create `loopback file' : %s\n", iod->name); break; default: mif_err("wrong io_type : %d\n", iod->io_typ); return -EINVAL; } mif_debug("%s(%d) : init_io_device() done : %d\n", iod->name, iod->io_typ, ret); return ret; }
gpl-2.0
henrix/beagle-linux
kernel/sys_ni.c
148
7137
#include <linux/linkage.h> #include <linux/errno.h> #include <asm/unistd.h> /* we can't #include <linux/syscalls.h> here, but tell gcc to not warn with -Wmissing-prototypes */ asmlinkage long sys_ni_syscall(void); /* * Non-implemented system calls get redirected here. */ asmlinkage long sys_ni_syscall(void) { return -ENOSYS; } cond_syscall(sys_quotactl); cond_syscall(sys32_quotactl); cond_syscall(sys_acct); cond_syscall(sys_lookup_dcookie); cond_syscall(compat_sys_lookup_dcookie); cond_syscall(sys_swapon); cond_syscall(sys_swapoff); cond_syscall(sys_kexec_load); cond_syscall(compat_sys_kexec_load); cond_syscall(sys_kexec_file_load); cond_syscall(sys_init_module); cond_syscall(sys_finit_module); cond_syscall(sys_delete_module); cond_syscall(sys_socketpair); cond_syscall(sys_bind); cond_syscall(sys_listen); cond_syscall(sys_accept); cond_syscall(sys_accept4); cond_syscall(sys_connect); cond_syscall(sys_getsockname); cond_syscall(sys_getpeername); cond_syscall(sys_sendto); cond_syscall(sys_send); cond_syscall(sys_recvfrom); cond_syscall(sys_recv); cond_syscall(sys_socket); cond_syscall(sys_setsockopt); cond_syscall(compat_sys_setsockopt); cond_syscall(sys_getsockopt); cond_syscall(compat_sys_getsockopt); cond_syscall(sys_shutdown); cond_syscall(sys_sendmsg); cond_syscall(sys_sendmmsg); cond_syscall(compat_sys_sendmsg); cond_syscall(compat_sys_sendmmsg); cond_syscall(sys_recvmsg); cond_syscall(sys_recvmmsg); cond_syscall(compat_sys_recvmsg); cond_syscall(compat_sys_recv); cond_syscall(compat_sys_recvfrom); cond_syscall(compat_sys_recvmmsg); cond_syscall(sys_socketcall); cond_syscall(sys_futex); cond_syscall(compat_sys_futex); cond_syscall(sys_set_robust_list); cond_syscall(compat_sys_set_robust_list); cond_syscall(sys_get_robust_list); cond_syscall(compat_sys_get_robust_list); cond_syscall(sys_epoll_create); cond_syscall(sys_epoll_create1); cond_syscall(sys_epoll_ctl); cond_syscall(sys_epoll_wait); cond_syscall(sys_epoll_pwait); cond_syscall(compat_sys_epoll_pwait); cond_syscall(sys_semget); cond_syscall(sys_semop); cond_syscall(sys_semtimedop); cond_syscall(compat_sys_semtimedop); cond_syscall(sys_semctl); cond_syscall(compat_sys_semctl); cond_syscall(sys_msgget); cond_syscall(sys_msgsnd); cond_syscall(compat_sys_msgsnd); cond_syscall(sys_msgrcv); cond_syscall(compat_sys_msgrcv); cond_syscall(sys_msgctl); cond_syscall(compat_sys_msgctl); cond_syscall(sys_shmget); cond_syscall(sys_shmat); cond_syscall(compat_sys_shmat); cond_syscall(sys_shmdt); cond_syscall(sys_shmctl); cond_syscall(compat_sys_shmctl); cond_syscall(sys_mq_open); cond_syscall(sys_mq_unlink); cond_syscall(sys_mq_timedsend); cond_syscall(sys_mq_timedreceive); cond_syscall(sys_mq_notify); cond_syscall(sys_mq_getsetattr); cond_syscall(compat_sys_mq_open); cond_syscall(compat_sys_mq_timedsend); cond_syscall(compat_sys_mq_timedreceive); cond_syscall(compat_sys_mq_notify); cond_syscall(compat_sys_mq_getsetattr); cond_syscall(sys_mbind); cond_syscall(sys_get_mempolicy); cond_syscall(sys_set_mempolicy); cond_syscall(compat_sys_mbind); cond_syscall(compat_sys_get_mempolicy); cond_syscall(compat_sys_set_mempolicy); cond_syscall(sys_add_key); cond_syscall(sys_request_key); cond_syscall(sys_keyctl); cond_syscall(compat_sys_keyctl); cond_syscall(compat_sys_socketcall); cond_syscall(sys_inotify_init); cond_syscall(sys_inotify_init1); cond_syscall(sys_inotify_add_watch); cond_syscall(sys_inotify_rm_watch); cond_syscall(sys_migrate_pages); cond_syscall(sys_move_pages); cond_syscall(sys_chown16); cond_syscall(sys_fchown16); cond_syscall(sys_getegid16); cond_syscall(sys_geteuid16); cond_syscall(sys_getgid16); cond_syscall(sys_getgroups16); cond_syscall(sys_getresgid16); cond_syscall(sys_getresuid16); cond_syscall(sys_getuid16); cond_syscall(sys_lchown16); cond_syscall(sys_setfsgid16); cond_syscall(sys_setfsuid16); cond_syscall(sys_setgid16); cond_syscall(sys_setgroups16); cond_syscall(sys_setregid16); cond_syscall(sys_setresgid16); cond_syscall(sys_setresuid16); cond_syscall(sys_setreuid16); cond_syscall(sys_setuid16); cond_syscall(sys_sgetmask); cond_syscall(sys_ssetmask); cond_syscall(sys_vm86old); cond_syscall(sys_vm86); cond_syscall(sys_modify_ldt); cond_syscall(sys_ipc); cond_syscall(compat_sys_ipc); cond_syscall(compat_sys_sysctl); cond_syscall(sys_flock); cond_syscall(sys_io_setup); cond_syscall(sys_io_destroy); cond_syscall(sys_io_submit); cond_syscall(sys_io_cancel); cond_syscall(sys_io_getevents); cond_syscall(sys_sysfs); cond_syscall(sys_syslog); cond_syscall(sys_process_vm_readv); cond_syscall(sys_process_vm_writev); cond_syscall(compat_sys_process_vm_readv); cond_syscall(compat_sys_process_vm_writev); cond_syscall(sys_uselib); cond_syscall(sys_fadvise64); cond_syscall(sys_fadvise64_64); cond_syscall(sys_madvise); cond_syscall(sys_setuid); cond_syscall(sys_setregid); cond_syscall(sys_setgid); cond_syscall(sys_setreuid); cond_syscall(sys_setresuid); cond_syscall(sys_getresuid); cond_syscall(sys_setresgid); cond_syscall(sys_getresgid); cond_syscall(sys_setgroups); cond_syscall(sys_getgroups); cond_syscall(sys_setfsuid); cond_syscall(sys_setfsgid); cond_syscall(sys_capget); cond_syscall(sys_capset); /* arch-specific weak syscall entries */ cond_syscall(sys_pciconfig_read); cond_syscall(sys_pciconfig_write); cond_syscall(sys_pciconfig_iobase); cond_syscall(compat_sys_s390_ipc); cond_syscall(ppc_rtas); cond_syscall(sys_spu_run); cond_syscall(sys_spu_create); cond_syscall(sys_subpage_prot); cond_syscall(sys_s390_pci_mmio_read); cond_syscall(sys_s390_pci_mmio_write); /* mmu depending weak syscall entries */ cond_syscall(sys_mprotect); cond_syscall(sys_msync); cond_syscall(sys_mlock); cond_syscall(sys_munlock); cond_syscall(sys_mlockall); cond_syscall(sys_munlockall); cond_syscall(sys_mlock2); cond_syscall(sys_mincore); cond_syscall(sys_madvise); cond_syscall(sys_mremap); cond_syscall(sys_remap_file_pages); cond_syscall(compat_sys_move_pages); cond_syscall(compat_sys_migrate_pages); /* block-layer dependent */ cond_syscall(sys_bdflush); cond_syscall(sys_ioprio_set); cond_syscall(sys_ioprio_get); /* New file descriptors */ cond_syscall(sys_signalfd); cond_syscall(sys_signalfd4); cond_syscall(compat_sys_signalfd); cond_syscall(compat_sys_signalfd4); cond_syscall(sys_timerfd_create); cond_syscall(sys_timerfd_settime); cond_syscall(sys_timerfd_gettime); cond_syscall(compat_sys_timerfd_settime); cond_syscall(compat_sys_timerfd_gettime); cond_syscall(sys_eventfd); cond_syscall(sys_eventfd2); cond_syscall(sys_memfd_create); cond_syscall(sys_userfaultfd); /* performance counters: */ cond_syscall(sys_perf_event_open); /* fanotify! */ cond_syscall(sys_fanotify_init); cond_syscall(sys_fanotify_mark); cond_syscall(compat_sys_fanotify_mark); /* open by handle */ cond_syscall(sys_name_to_handle_at); cond_syscall(sys_open_by_handle_at); cond_syscall(compat_sys_open_by_handle_at); /* compare kernel pointers */ cond_syscall(sys_kcmp); /* operate on Secure Computing state */ cond_syscall(sys_seccomp); /* access BPF programs and maps */ cond_syscall(sys_bpf); /* execveat */ cond_syscall(sys_execveat); /* membarrier */ cond_syscall(sys_membarrier);
gpl-2.0
nathanlnw/TJ_TW703_cdma
components/net/lwip-1.3.2/src/netif/ppp/auth.c
148
25942
/***************************************************************************** * auth.c - Network Authentication and Phase Control program file. * * Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. * Copyright (c) 1997 by Global Election Systems Inc. All rights reserved. * * The authors hereby grant permission to use, copy, modify, distribute, * and license this software and its documentation for any purpose, provided * that existing copyright notices are retained in all copies and that this * notice and the following disclaimer are included verbatim in any * distributions. No written agreement, license, or royalty fee is required * for any of the authorized uses. * * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** * REVISION HISTORY * * 03-01-01 Marc Boucher <marc@mbsi.ca> * Ported to lwIP. * 97-12-08 Guy Lancaster <lancasterg@acm.org>, Global Election Systems Inc. * Ported from public pppd code. *****************************************************************************/ /* * auth.c - PPP authentication and phase control. * * Copyright (c) 1993 The Australian National University. * All rights reserved. * * Redistribution and use in source and binary forms are permitted * provided that the above copyright notice and this paragraph are * duplicated in all such forms and that any documentation, * advertising materials, and other materials related to such * distribution and use acknowledge that the software was developed * by the Australian National University. The name of the University * may not be used to endorse or promote products derived from this * software without specific prior written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Copyright (c) 1989 Carnegie Mellon University. * All rights reserved. * * Redistribution and use in source and binary forms are permitted * provided that the above copyright notice and this paragraph are * duplicated in all such forms and that any documentation, * advertising materials, and other materials related to such * distribution and use acknowledge that the software was developed * by Carnegie Mellon University. The name of the * University may not be used to endorse or promote products derived * from this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include "lwip/opt.h" #if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ #include "ppp.h" #include "pppdebug.h" #include "fsm.h" #include "lcp.h" #include "pap.h" #include "chap.h" #include "auth.h" #include "ipcp.h" #if CBCP_SUPPORT #include "cbcp.h" #endif /* CBCP_SUPPORT */ #include <string.h> /*************************/ /*** LOCAL DEFINITIONS ***/ /*************************/ /* Bits in auth_pending[] */ #define PAP_WITHPEER 1 #define PAP_PEER 2 #define CHAP_WITHPEER 4 #define CHAP_PEER 8 /************************/ /*** LOCAL DATA TYPES ***/ /************************/ /* Used for storing a sequence of words. Usually malloced. */ struct wordlist { struct wordlist *next; char word[1]; }; /***********************************/ /*** LOCAL FUNCTION DECLARATIONS ***/ /***********************************/ extern char *crypt (const char *, const char *); /* Prototypes for procedures local to this file. */ static void network_phase (int); static void check_idle (void *); static void connect_time_expired (void *); #if 0 static int login (char *, char *, char **, int *); #endif static void logout (void); static int null_login (int); static int get_pap_passwd (int, char *, char *); static int have_pap_secret (void); static int have_chap_secret (char *, char *, u32_t); static int ip_addr_check (u32_t, struct wordlist *); #if 0 /* PAP_SUPPORT || CHAP_SUPPORT */ static void set_allowed_addrs(int unit, struct wordlist *addrs); static void free_wordlist (struct wordlist *); #endif /* 0 */ /* PAP_SUPPORT || CHAP_SUPPORT */ #if CBCP_SUPPORT static void callback_phase (int); #endif /* CBCP_SUPPORT */ /******************************/ /*** PUBLIC DATA STRUCTURES ***/ /******************************/ /*****************************/ /*** LOCAL DATA STRUCTURES ***/ /*****************************/ #if PAP_SUPPORT || CHAP_SUPPORT /* The name by which the peer authenticated itself to us. */ static char peer_authname[MAXNAMELEN]; #endif /* PAP_SUPPORT || CHAP_SUPPORT */ /* Records which authentication operations haven't completed yet. */ static int auth_pending[NUM_PPP]; /* Set if we have successfully called login() */ static int logged_in; /* Set if we have run the /etc/ppp/auth-up script. */ static int did_authup; /* List of addresses which the peer may use. */ static struct wordlist *addresses[NUM_PPP]; /* Number of network protocols which we have opened. */ static int num_np_open; /* Number of network protocols which have come up. */ static int num_np_up; #if PAP_SUPPORT || CHAP_SUPPORT /* Set if we got the contents of passwd[] from the pap-secrets file. */ static int passwd_from_file; #endif /* PAP_SUPPORT || CHAP_SUPPORT */ /***********************************/ /*** PUBLIC FUNCTION DEFINITIONS ***/ /***********************************/ /* * An Open on LCP has requested a change from Dead to Establish phase. * Do what's necessary to bring the physical layer up. */ void link_required(int unit) { LWIP_UNUSED_ARG(unit); AUTHDEBUG((LOG_INFO, "link_required: %d\n", unit)); } /* * LCP has terminated the link; go to the Dead phase and take the * physical layer down. */ void link_terminated(int unit) { AUTHDEBUG((LOG_INFO, "link_terminated: %d\n", unit)); if (lcp_phase[unit] == PHASE_DEAD) { return; } if (logged_in) { logout(); } lcp_phase[unit] = PHASE_DEAD; AUTHDEBUG((LOG_NOTICE, "Connection terminated.\n")); pppLinkTerminated(unit); } /* * LCP has gone down; it will either die or try to re-establish. */ void link_down(int unit) { int i; struct protent *protp; AUTHDEBUG((LOG_INFO, "link_down: %d\n", unit)); if (did_authup) { /* XXX Do link down processing. */ did_authup = 0; } for (i = 0; (protp = ppp_protocols[i]) != NULL; ++i) { if (!protp->enabled_flag) { continue; } if (protp->protocol != PPP_LCP && protp->lowerdown != NULL) { (*protp->lowerdown)(unit); } if (protp->protocol < 0xC000 && protp->close != NULL) { (*protp->close)(unit, "LCP down"); } } num_np_open = 0; num_np_up = 0; if (lcp_phase[unit] != PHASE_DEAD) { lcp_phase[unit] = PHASE_TERMINATE; } pppLinkDown(unit); } /* * The link is established. * Proceed to the Dead, Authenticate or Network phase as appropriate. */ void link_established(int unit) { int auth; int i; struct protent *protp; lcp_options *wo = &lcp_wantoptions[unit]; lcp_options *go = &lcp_gotoptions[unit]; #if PAP_SUPPORT || CHAP_SUPPORT lcp_options *ho = &lcp_hisoptions[unit]; #endif /* PAP_SUPPORT || CHAP_SUPPORT */ AUTHDEBUG((LOG_INFO, "link_established: %d\n", unit)); /* * Tell higher-level protocols that LCP is up. */ for (i = 0; (protp = ppp_protocols[i]) != NULL; ++i) { if (protp->protocol != PPP_LCP && protp->enabled_flag && protp->lowerup != NULL) { (*protp->lowerup)(unit); } } if (ppp_settings.auth_required && !(go->neg_chap || go->neg_upap)) { /* * We wanted the peer to authenticate itself, and it refused: * treat it as though it authenticated with PAP using a username * of "" and a password of "". If that's not OK, boot it out. */ if (!wo->neg_upap || !null_login(unit)) { AUTHDEBUG((LOG_WARNING, "peer refused to authenticate\n")); lcp_close(unit, "peer refused to authenticate"); return; } } lcp_phase[unit] = PHASE_AUTHENTICATE; auth = 0; #if CHAP_SUPPORT if (go->neg_chap) { ChapAuthPeer(unit, ppp_settings.our_name, go->chap_mdtype); auth |= CHAP_PEER; } #endif /* CHAP_SUPPORT */ #if PAP_SUPPORT && CHAP_SUPPORT else #endif /* PAP_SUPPORT && CHAP_SUPPORT */ #if PAP_SUPPORT if (go->neg_upap) { upap_authpeer(unit); auth |= PAP_PEER; } #endif /* PAP_SUPPORT */ #if CHAP_SUPPORT if (ho->neg_chap) { ChapAuthWithPeer(unit, ppp_settings.user, ho->chap_mdtype); auth |= CHAP_WITHPEER; } #endif /* CHAP_SUPPORT */ #if PAP_SUPPORT && CHAP_SUPPORT else #endif /* PAP_SUPPORT && CHAP_SUPPORT */ #if PAP_SUPPORT if (ho->neg_upap) { if (ppp_settings.passwd[0] == 0) { passwd_from_file = 1; if (!get_pap_passwd(unit, ppp_settings.user, ppp_settings.passwd)) { AUTHDEBUG((LOG_ERR, "No secret found for PAP login\n")); } } upap_authwithpeer(unit, ppp_settings.user, ppp_settings.passwd); auth |= PAP_WITHPEER; } #endif /* PAP_SUPPORT */ auth_pending[unit] = auth; if (!auth) { network_phase(unit); } } /* * The peer has failed to authenticate himself using `protocol'. */ void auth_peer_fail(int unit, u16_t protocol) { LWIP_UNUSED_ARG(protocol); AUTHDEBUG((LOG_INFO, "auth_peer_fail: %d proto=%X\n", unit, protocol)); /* * Authentication failure: take the link down */ lcp_close(unit, "Authentication failed"); } #if PAP_SUPPORT || CHAP_SUPPORT /* * The peer has been successfully authenticated using `protocol'. */ void auth_peer_success(int unit, u16_t protocol, char *name, int namelen) { int pbit; AUTHDEBUG((LOG_INFO, "auth_peer_success: %d proto=%X\n", unit, protocol)); switch (protocol) { case PPP_CHAP: pbit = CHAP_PEER; break; case PPP_PAP: pbit = PAP_PEER; break; default: AUTHDEBUG((LOG_WARNING, "auth_peer_success: unknown protocol %x\n", protocol)); return; } /* * Save the authenticated name of the peer for later. */ if (namelen > sizeof(peer_authname) - 1) { namelen = sizeof(peer_authname) - 1; } BCOPY(name, peer_authname, namelen); peer_authname[namelen] = 0; /* * If there is no more authentication still to be done, * proceed to the network (or callback) phase. */ if ((auth_pending[unit] &= ~pbit) == 0) { network_phase(unit); } } /* * We have failed to authenticate ourselves to the peer using `protocol'. */ void auth_withpeer_fail(int unit, u16_t protocol) { int errCode = PPPERR_AUTHFAIL; LWIP_UNUSED_ARG(protocol); AUTHDEBUG((LOG_INFO, "auth_withpeer_fail: %d proto=%X\n", unit, protocol)); if (passwd_from_file) { BZERO(ppp_settings.passwd, MAXSECRETLEN); } /* * XXX Warning: the unit number indicates the interface which is * not necessarily the PPP connection. It works here as long * as we are only supporting PPP interfaces. */ pppIOCtl(unit, PPPCTLS_ERRCODE, &errCode); /* * We've failed to authenticate ourselves to our peer. * He'll probably take the link down, and there's not much * we can do except wait for that. */ } /* * We have successfully authenticated ourselves with the peer using `protocol'. */ void auth_withpeer_success(int unit, u16_t protocol) { int pbit; AUTHDEBUG((LOG_INFO, "auth_withpeer_success: %d proto=%X\n", unit, protocol)); switch (protocol) { case PPP_CHAP: pbit = CHAP_WITHPEER; break; case PPP_PAP: if (passwd_from_file) { BZERO(ppp_settings.passwd, MAXSECRETLEN); } pbit = PAP_WITHPEER; break; default: AUTHDEBUG((LOG_WARNING, "auth_peer_success: unknown protocol %x\n", protocol)); pbit = 0; } /* * If there is no more authentication still being done, * proceed to the network (or callback) phase. */ if ((auth_pending[unit] &= ~pbit) == 0) { network_phase(unit); } } #endif /* PAP_SUPPORT || CHAP_SUPPORT */ /* * np_up - a network protocol has come up. */ void np_up(int unit, u16_t proto) { LWIP_UNUSED_ARG(unit); LWIP_UNUSED_ARG(proto); AUTHDEBUG((LOG_INFO, "np_up: %d proto=%X\n", unit, proto)); if (num_np_up == 0) { AUTHDEBUG((LOG_INFO, "np_up: maxconnect=%d idle_time_limit=%d\n",ppp_settings.maxconnect,ppp_settings.idle_time_limit)); /* * At this point we consider that the link has come up successfully. */ if (ppp_settings.idle_time_limit > 0) { TIMEOUT(check_idle, NULL, ppp_settings.idle_time_limit); } /* * Set a timeout to close the connection once the maximum * connect time has expired. */ if (ppp_settings.maxconnect > 0) { TIMEOUT(connect_time_expired, 0, ppp_settings.maxconnect); } } ++num_np_up; } /* * np_down - a network protocol has gone down. */ void np_down(int unit, u16_t proto) { LWIP_UNUSED_ARG(unit); LWIP_UNUSED_ARG(proto); AUTHDEBUG((LOG_INFO, "np_down: %d proto=%X\n", unit, proto)); if (--num_np_up == 0 && ppp_settings.idle_time_limit > 0) { UNTIMEOUT(check_idle, NULL); } } /* * np_finished - a network protocol has finished using the link. */ void np_finished(int unit, u16_t proto) { LWIP_UNUSED_ARG(unit); LWIP_UNUSED_ARG(proto); AUTHDEBUG((LOG_INFO, "np_finished: %d proto=%X\n", unit, proto)); if (--num_np_open <= 0) { /* no further use for the link: shut up shop. */ lcp_close(0, "No network protocols running"); } } /* * auth_reset - called when LCP is starting negotiations to recheck * authentication options, i.e. whether we have appropriate secrets * to use for authenticating ourselves and/or the peer. */ void auth_reset(int unit) { lcp_options *go = &lcp_gotoptions[unit]; lcp_options *ao = &lcp_allowoptions[0]; ipcp_options *ipwo = &ipcp_wantoptions[0]; u32_t remote; AUTHDEBUG((LOG_INFO, "auth_reset: %d\n", unit)); ao->neg_upap = !ppp_settings.refuse_pap && (ppp_settings.passwd[0] != 0 || get_pap_passwd(unit, NULL, NULL)); ao->neg_chap = !ppp_settings.refuse_chap && ppp_settings.passwd[0] != 0 /*have_chap_secret(ppp_settings.user, ppp_settings.remote_name, (u32_t)0)*/; if (go->neg_upap && !have_pap_secret()) { go->neg_upap = 0; } if (go->neg_chap) { remote = ipwo->accept_remote? 0: ipwo->hisaddr; if (!have_chap_secret(ppp_settings.remote_name, ppp_settings.our_name, remote)) { go->neg_chap = 0; } } } #if PAP_SUPPORT /* * check_passwd - Check the user name and passwd against the PAP secrets * file. If requested, also check against the system password database, * and login the user if OK. * * returns: * UPAP_AUTHNAK: Authentication failed. * UPAP_AUTHACK: Authentication succeeded. * In either case, msg points to an appropriate message. */ int check_passwd( int unit, char *auser, int userlen, char *apasswd, int passwdlen, char **msg, int *msglen) { #if 1 LWIP_UNUSED_ARG(unit); LWIP_UNUSED_ARG(auser); LWIP_UNUSED_ARG(userlen); LWIP_UNUSED_ARG(apasswd); LWIP_UNUSED_ARG(passwdlen); LWIP_UNUSED_ARG(msglen); *msg = (char *) 0; return UPAP_AUTHACK; /* XXX Assume all entries OK. */ #else int ret = 0; struct wordlist *addrs = NULL; char passwd[256], user[256]; char secret[MAXWORDLEN]; static u_short attempts = 0; /* * Make copies of apasswd and auser, then null-terminate them. */ BCOPY(apasswd, passwd, passwdlen); passwd[passwdlen] = '\0'; BCOPY(auser, user, userlen); user[userlen] = '\0'; *msg = (char *) 0; /* XXX Validate user name and password. */ ret = UPAP_AUTHACK; /* XXX Assume all entries OK. */ if (ret == UPAP_AUTHNAK) { if (*msg == (char *) 0) { *msg = "Login incorrect"; } *msglen = strlen(*msg); /* * Frustrate passwd stealer programs. * Allow 10 tries, but start backing off after 3 (stolen from login). * On 10'th, drop the connection. */ if (attempts++ >= 10) { AUTHDEBUG((LOG_WARNING, "%d LOGIN FAILURES BY %s\n", attempts, user)); /*ppp_panic("Excess Bad Logins");*/ } if (attempts > 3) { sys_msleep((attempts - 3) * 5); } if (addrs != NULL) { free_wordlist(addrs); } } else { attempts = 0; /* Reset count */ if (*msg == (char *) 0) { *msg = "Login ok"; } *msglen = strlen(*msg); set_allowed_addrs(unit, addrs); } BZERO(passwd, sizeof(passwd)); BZERO(secret, sizeof(secret)); return ret; #endif } #endif /* PAP_SUPPORT */ /* * auth_ip_addr - check whether the peer is authorized to use * a given IP address. Returns 1 if authorized, 0 otherwise. */ int auth_ip_addr(int unit, u32_t addr) { return ip_addr_check(addr, addresses[unit]); } /* * bad_ip_adrs - return 1 if the IP address is one we don't want * to use, such as an address in the loopback net or a multicast address. * addr is in network byte order. */ int bad_ip_adrs(u32_t addr) { addr = ntohl(addr); return (addr >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || IN_MULTICAST(addr) || IN_BADCLASS(addr); } #if CHAP_SUPPORT /* * get_secret - open the CHAP secret file and return the secret * for authenticating the given client on the given server. * (We could be either client or server). */ int get_secret( int unit, char *client, char *server, char *secret, int *secret_len, int save_addrs) { #if 1 int len; struct wordlist *addrs; LWIP_UNUSED_ARG(unit); LWIP_UNUSED_ARG(server); LWIP_UNUSED_ARG(save_addrs); addrs = NULL; if(!client || !client[0] || strcmp(client, ppp_settings.user)) { return 0; } len = strlen(ppp_settings.passwd); if (len > MAXSECRETLEN) { AUTHDEBUG((LOG_ERR, "Secret for %s on %s is too long\n", client, server)); len = MAXSECRETLEN; } BCOPY(ppp_settings.passwd, secret, len); *secret_len = len; return 1; #else int ret = 0, len; struct wordlist *addrs; char secbuf[MAXWORDLEN]; addrs = NULL; secbuf[0] = 0; /* XXX Find secret. */ if (ret < 0) { return 0; } if (save_addrs) { set_allowed_addrs(unit, addrs); } len = strlen(secbuf); if (len > MAXSECRETLEN) { AUTHDEBUG((LOG_ERR, "Secret for %s on %s is too long\n", client, server)); len = MAXSECRETLEN; } BCOPY(secbuf, secret, len); BZERO(secbuf, sizeof(secbuf)); *secret_len = len; return 1; #endif } #endif /* CHAP_SUPPORT */ #if 0 /* UNUSED */ /* * auth_check_options - called to check authentication options. */ void auth_check_options(void) { lcp_options *wo = &lcp_wantoptions[0]; int can_auth; ipcp_options *ipwo = &ipcp_wantoptions[0]; u32_t remote; /* Default our_name to hostname, and user to our_name */ if (ppp_settings.our_name[0] == 0 || ppp_settings.usehostname) { strcpy(ppp_settings.our_name, ppp_settings.hostname); } if (ppp_settings.user[0] == 0) { strcpy(ppp_settings.user, ppp_settings.our_name); } /* If authentication is required, ask peer for CHAP or PAP. */ if (ppp_settings.auth_required && !wo->neg_chap && !wo->neg_upap) { wo->neg_chap = 1; wo->neg_upap = 1; } /* * Check whether we have appropriate secrets to use * to authenticate the peer. */ can_auth = wo->neg_upap && have_pap_secret(); if (!can_auth && wo->neg_chap) { remote = ipwo->accept_remote? 0: ipwo->hisaddr; can_auth = have_chap_secret(ppp_settings.remote_name, ppp_settings.our_name, remote); } if (ppp_settings.auth_required && !can_auth) { ppp_panic("No auth secret"); } } #endif /**********************************/ /*** LOCAL FUNCTION DEFINITIONS ***/ /**********************************/ /* * Proceed to the network phase. */ static void network_phase(int unit) { int i; struct protent *protp; lcp_options *go = &lcp_gotoptions[unit]; /* * If the peer had to authenticate, run the auth-up script now. */ if ((go->neg_chap || go->neg_upap) && !did_authup) { /* XXX Do setup for peer authentication. */ did_authup = 1; } #if CBCP_SUPPORT /* * If we negotiated callback, do it now. */ if (go->neg_cbcp) { lcp_phase[unit] = PHASE_CALLBACK; (*cbcp_protent.open)(unit); return; } #endif /* CBCP_SUPPORT */ lcp_phase[unit] = PHASE_NETWORK; for (i = 0; (protp = ppp_protocols[i]) != NULL; ++i) { if (protp->protocol < 0xC000 && protp->enabled_flag && protp->open != NULL) { (*protp->open)(unit); if (protp->protocol != PPP_CCP) { ++num_np_open; } } } if (num_np_open == 0) { /* nothing to do */ lcp_close(0, "No network protocols running"); } } /* * check_idle - check whether the link has been idle for long * enough that we can shut it down. */ static void check_idle(void *arg) { struct ppp_idle idle; u_short itime; LWIP_UNUSED_ARG(arg); if (!get_idle_time(0, &idle)) { return; } itime = LWIP_MIN(idle.xmit_idle, idle.recv_idle); if (itime >= ppp_settings.idle_time_limit) { /* link is idle: shut it down. */ AUTHDEBUG((LOG_INFO, "Terminating connection due to lack of activity.\n")); lcp_close(0, "Link inactive"); } else { TIMEOUT(check_idle, NULL, ppp_settings.idle_time_limit - itime); } } /* * connect_time_expired - log a message and close the connection. */ static void connect_time_expired(void *arg) { LWIP_UNUSED_ARG(arg); AUTHDEBUG((LOG_INFO, "Connect time expired\n")); lcp_close(0, "Connect time expired"); /* Close connection */ } #if 0 /* * login - Check the user name and password against the system * password database, and login the user if OK. * * returns: * UPAP_AUTHNAK: Login failed. * UPAP_AUTHACK: Login succeeded. * In either case, msg points to an appropriate message. */ static int login(char *user, char *passwd, char **msg, int *msglen) { /* XXX Fail until we decide that we want to support logins. */ return (UPAP_AUTHNAK); } #endif /* * logout - Logout the user. */ static void logout(void) { logged_in = 0; } /* * null_login - Check if a username of "" and a password of "" are * acceptable, and iff so, set the list of acceptable IP addresses * and return 1. */ static int null_login(int unit) { LWIP_UNUSED_ARG(unit); /* XXX Fail until we decide that we want to support logins. */ return 0; } /* * get_pap_passwd - get a password for authenticating ourselves with * our peer using PAP. Returns 1 on success, 0 if no suitable password * could be found. */ static int get_pap_passwd(int unit, char *user, char *passwd) { LWIP_UNUSED_ARG(unit); /* normally we would reject PAP if no password is provided, but this causes problems with some providers (like CHT in Taiwan) who incorrectly request PAP and expect a bogus/empty password, so always provide a default user/passwd of "none"/"none" */ if(user) { strcpy(user, "none"); } if(passwd) { strcpy(passwd, "none"); } return 1; } /* * have_pap_secret - check whether we have a PAP file with any * secrets that we could possibly use for authenticating the peer. */ static int have_pap_secret(void) { /* XXX Fail until we set up our passwords. */ return 0; } /* * have_chap_secret - check whether we have a CHAP file with a * secret that we could possibly use for authenticating `client' * on `server'. Either can be the null string, meaning we don't * know the identity yet. */ static int have_chap_secret(char *client, char *server, u32_t remote) { LWIP_UNUSED_ARG(client); LWIP_UNUSED_ARG(server); LWIP_UNUSED_ARG(remote); /* XXX Fail until we set up our passwords. */ return 0; } #if 0 /* PAP_SUPPORT || CHAP_SUPPORT */ /* * set_allowed_addrs() - set the list of allowed addresses. */ static void set_allowed_addrs(int unit, struct wordlist *addrs) { if (addresses[unit] != NULL) { free_wordlist(addresses[unit]); } addresses[unit] = addrs; #if 0 /* * If there's only one authorized address we might as well * ask our peer for that one right away */ if (addrs != NULL && addrs->next == NULL) { char *p = addrs->word; struct ipcp_options *wo = &ipcp_wantoptions[unit]; u32_t a; struct hostent *hp; if (wo->hisaddr == 0 && *p != '!' && *p != '-' && strchr(p, '/') == NULL) { hp = gethostbyname(p); if (hp != NULL && hp->h_addrtype == AF_INET) { a = *(u32_t *)hp->h_addr; } else { a = inet_addr(p); } if (a != (u32_t) -1) { wo->hisaddr = a; } } } #endif } #endif /* 0 */ /* PAP_SUPPORT || CHAP_SUPPORT */ static int ip_addr_check(u32_t addr, struct wordlist *addrs) { /* don't allow loopback or multicast address */ if (bad_ip_adrs(addr)) { return 0; } if (addrs == NULL) { return !ppp_settings.auth_required; /* no addresses authorized */ } /* XXX All other addresses allowed. */ return 1; } #if 0 /* PAP_SUPPORT || CHAP_SUPPORT */ /* * free_wordlist - release memory allocated for a wordlist. */ static void free_wordlist(struct wordlist *wp) { struct wordlist *next; while (wp != NULL) { next = wp->next; free(wp); wp = next; } } #endif /* 0 */ /* PAP_SUPPORT || CHAP_SUPPORT */ #endif /* PPP_SUPPORT */
gpl-2.0
psomas/lguest64
arch/sparc/mm/srmmu.c
148
70401
/* * srmmu.c: SRMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/bootmem.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/kdebug.h> #include <linux/log2.h> #include <linux/gfp.h> #include <asm/bitext.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/vaddrs.h> #include <asm/traps.h> #include <asm/smp.h> #include <asm/mbus.h> #include <asm/cache.h> #include <asm/oplib.h> #include <asm/asi.h> #include <asm/msi.h> #include <asm/mmu_context.h> #include <asm/io-unit.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> /* Now the cpu specific definitions. */ #include <asm/viking.h> #include <asm/mxcc.h> #include <asm/ross.h> #include <asm/tsunami.h> #include <asm/swift.h> #include <asm/turbosparc.h> #include <asm/leon.h> #include <asm/btfixup.h> enum mbus_module srmmu_modtype; static unsigned int hwbug_bitmask; int vac_cache_size; int vac_line_size; extern struct resource sparc_iomap; extern unsigned long last_valid_pfn; extern unsigned long page_kernel; static pgd_t *srmmu_swapper_pg_dir; #ifdef CONFIG_SMP #define FLUSH_BEGIN(mm) #define FLUSH_END #else #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { #define FLUSH_END } #endif BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) int flush_page_for_dma_global = 1; #ifdef CONFIG_SMP BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page) #endif char *srmmu_name; ctxd_t *srmmu_ctx_table_phys; static ctxd_t *srmmu_context_table; int viking_mxcc_present; static DEFINE_SPINLOCK(srmmu_context_spinlock); static int is_hypersparc; /* * In general all page table modifications should use the V8 atomic * swap instruction. This insures the mmu and the cpu are in sync * with respect to ref/mod bits in the page tables. */ static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) { __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr)); return value; } static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval) { srmmu_swap((unsigned long *)ptep, pte_val(pteval)); } /* The very generic SRMMU page table operations. */ static inline int srmmu_device_memory(unsigned long x) { return ((x & 0xF0000000) != 0); } static int srmmu_cache_pagetables; /* these will be initialized in srmmu_nocache_calcsize() */ static unsigned long srmmu_nocache_size; static unsigned long srmmu_nocache_end; /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) /* The context table is a nocache user with the biggest alignment needs. */ #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS) void *srmmu_nocache_pool; void *srmmu_nocache_bitmap; static struct bit_map srmmu_nocache_map; static unsigned long srmmu_pte_pfn(pte_t pte) { if (srmmu_device_memory(pte_val(pte))) { /* Just return something that will cause * pfn_valid() to return false. This makes * copy_one_pte() to just directly copy to * PTE over. */ return ~0UL; } return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); } static struct page *srmmu_pmd_page(pmd_t pmd) { if (srmmu_device_memory(pmd_val(pmd))) BUG(); return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); } static inline unsigned long srmmu_pgd_page(pgd_t pgd) { return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } static inline int srmmu_pte_none(pte_t pte) { return !(pte_val(pte) & 0xFFFFFFF); } static inline int srmmu_pte_present(pte_t pte) { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); } static inline void srmmu_pte_clear(pte_t *ptep) { srmmu_set_pte(ptep, __pte(0)); } static inline int srmmu_pmd_none(pmd_t pmd) { return !(pmd_val(pmd) & 0xFFFFFFF); } static inline int srmmu_pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } static inline int srmmu_pmd_present(pmd_t pmd) { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } static inline void srmmu_pmd_clear(pmd_t *pmdp) { int i; for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); } static inline int srmmu_pgd_none(pgd_t pgd) { return !(pgd_val(pgd) & 0xFFFFFFF); } static inline int srmmu_pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } static inline int srmmu_pgd_present(pgd_t pgd) { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } static inline void srmmu_pgd_clear(pgd_t * pgdp) { srmmu_set_pte((pte_t *)pgdp, __pte(0)); } static inline pte_t srmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE);} static inline pte_t srmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_DIRTY);} static inline pte_t srmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_REF);} static inline pte_t srmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | SRMMU_WRITE);} static inline pte_t srmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | SRMMU_DIRTY);} static inline pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMMU_REF);} /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) { return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); } static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) { return __pte(((page) >> 4) | pgprot_val(pgprot)); } static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) { return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); } /* XXX should we hyper_flush_whole_icache here - Anton */ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) { srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) { srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) { unsigned long ptp; /* Physical address, shifted right by 4 */ int i; ptp = __nocache_pa((unsigned long) ptep) >> 4; for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); } } static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) { unsigned long ptp; /* Physical address, shifted right by 4 */ int i; ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); } } static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } /* to find an entry in a top-level page table... */ static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } /* Find an entry in the second-level page table.. */ static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) { return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); } /* Find an entry in the third-level page table.. */ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) { void *pte; pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4); return (pte_t *) pte + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); } static unsigned long srmmu_swp_type(swp_entry_t entry) { return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK; } static unsigned long srmmu_swp_offset(swp_entry_t entry) { return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK; } static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset) { return (swp_entry_t) { (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT }; } /* * size: bytes to allocate in the nocache area. * align: bytes, number to align at. * Returns the virtual address of the allocated area. */ static unsigned long __srmmu_get_nocache(int size, int align) { int offset; if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { printk("Size 0x%x too small for nocache request\n", size); size = SRMMU_NOCACHE_BITMAP_SHIFT; } if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { printk("Size 0x%x unaligned int nocache request\n", size); size += SRMMU_NOCACHE_BITMAP_SHIFT-1; } BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); offset = bit_map_string_get(&srmmu_nocache_map, size >> SRMMU_NOCACHE_BITMAP_SHIFT, align >> SRMMU_NOCACHE_BITMAP_SHIFT); if (offset == -1) { printk("srmmu: out of nocache %d: %d/%d\n", size, (int) srmmu_nocache_size, srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); return 0; } return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); } static unsigned long srmmu_get_nocache(int size, int align) { unsigned long tmp; tmp = __srmmu_get_nocache(size, align); if (tmp) memset((void *)tmp, 0, size); return tmp; } static void srmmu_free_nocache(unsigned long vaddr, int size) { int offset; if (vaddr < SRMMU_NOCACHE_VADDR) { printk("Vaddr %lx is smaller than nocache base 0x%lx\n", vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); BUG(); } if (vaddr+size > srmmu_nocache_end) { printk("Vaddr %lx is bigger than nocache end 0x%lx\n", vaddr, srmmu_nocache_end); BUG(); } if (!is_power_of_2(size)) { printk("Size 0x%x is not a power of 2\n", size); BUG(); } if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { printk("Size 0x%x is too small\n", size); BUG(); } if (vaddr & (size-1)) { printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); BUG(); } offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; size = size >> SRMMU_NOCACHE_BITMAP_SHIFT; bit_map_clear(&srmmu_nocache_map, offset, size); } static void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end); extern unsigned long probe_memory(void); /* in fault.c */ /* * Reserve nocache dynamically proportionally to the amount of * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 */ static void srmmu_nocache_calcsize(void) { unsigned long sysmemavail = probe_memory() / 1024; int srmmu_nocache_npages; srmmu_nocache_npages = sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256; /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */ // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256; if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES) srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES; /* anything above 1280 blows up */ if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES) srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES; srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE; srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; } static void __init srmmu_nocache_init(void) { unsigned int bitmap_bits; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned long paddr, vaddr; unsigned long pteval; bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX, 0UL); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); init_mm.pgd = srmmu_swapper_pg_dir; srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end); paddr = __pa((unsigned long)srmmu_nocache_pool); vaddr = SRMMU_NOCACHE_VADDR; while (vaddr < srmmu_nocache_end) { pgd = pgd_offset_k(vaddr); pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); if (srmmu_cache_pagetables) pteval |= SRMMU_CACHE; srmmu_set_pte(__nocache_fix(pte), __pte(pteval)); vaddr += PAGE_SIZE; paddr += PAGE_SIZE; } flush_cache_all(); flush_tlb_all(); } static inline pgd_t *srmmu_get_pgd_fast(void) { pgd_t *pgd = NULL; pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); if (pgd) { pgd_t *init = pgd_offset_k(0); memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); } return pgd; } static void srmmu_free_pgd_fast(pgd_t *pgd) { srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); } static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address) { return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); } static void srmmu_pmd_free(pmd_t * pmd) { srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); } /* * Hardware needs alignment to 256 only, but we align to whole page size * to reduce fragmentation problems due to the buddy principle. * XXX Provide actual fragmentation statistics in /proc. * * Alignments up to the page size are the same for physical and virtual * addresses of the nocache area. */ static pte_t * srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); } static pgtable_t srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) { unsigned long pte; struct page *page; if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) return NULL; page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); pgtable_page_ctor(page); return page; } static void srmmu_free_pte_fast(pte_t *pte) { srmmu_free_nocache((unsigned long)pte, PTE_SIZE); } static void srmmu_pte_free(pgtable_t pte) { unsigned long p; pgtable_page_dtor(pte); p = (unsigned long)page_address(pte); /* Cached address (for test) */ if (p == 0) BUG(); p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ p = (unsigned long) __nocache_va(p); /* Nocached virtual */ srmmu_free_nocache(p, PTE_SIZE); } /* */ static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) { struct ctx_list *ctxp; ctxp = ctx_free.next; if(ctxp != &ctx_free) { remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); mm->context = ctxp->ctx_number; ctxp->ctx_mm = mm; return; } ctxp = ctx_used.next; if(ctxp->ctx_mm == old_mm) ctxp = ctxp->next; if(ctxp == &ctx_used) panic("out of mmu contexts"); flush_cache_mm(ctxp->ctx_mm); flush_tlb_mm(ctxp->ctx_mm); remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); ctxp->ctx_mm->context = NO_CONTEXT; ctxp->ctx_mm = mm; mm->context = ctxp->ctx_number; } static inline void free_context(int context) { struct ctx_list *ctx_old; ctx_old = ctx_list_pool + context; remove_from_ctx_list(ctx_old); add_to_free_ctxlist(ctx_old); } static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu) { if(mm->context == NO_CONTEXT) { spin_lock(&srmmu_context_spinlock); alloc_context(old_mm, mm); spin_unlock(&srmmu_context_spinlock); srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); } if (sparc_cpu_model == sparc_leon) leon_switch_mm(); if (is_hypersparc) hyper_flush_whole_icache(); srmmu_set_context(mm->context); } /* Low level IO area allocation on the SRMMU. */ static inline void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; unsigned long tmp; physaddr &= PAGE_MASK; pgdp = pgd_offset_k(virt_addr); pmdp = srmmu_pmd_offset(pgdp, virt_addr); ptep = srmmu_pte_offset(pmdp, virt_addr); tmp = (physaddr >> 4) | SRMMU_ET_PTE; /* * I need to test whether this is consistent over all * sun4m's. The bus_type represents the upper 4 bits of * 36-bit physical address on the I/O space lines... */ tmp |= (bus_type << 28); tmp |= SRMMU_PRIV; __flush_page_to_ram(virt_addr); srmmu_set_pte(ptep, __pte(tmp)); } static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, unsigned long xva, unsigned int len) { while (len != 0) { len -= PAGE_SIZE; srmmu_mapioaddr(xpa, xva, bus); xva += PAGE_SIZE; xpa += PAGE_SIZE; } flush_tlb_all(); } static inline void srmmu_unmapioaddr(unsigned long virt_addr) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(virt_addr); pmdp = srmmu_pmd_offset(pgdp, virt_addr); ptep = srmmu_pte_offset(pmdp, virt_addr); /* No need to flush uncacheable page. */ srmmu_pte_clear(ptep); } static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) { while (len != 0) { len -= PAGE_SIZE; srmmu_unmapioaddr(virt_addr); virt_addr += PAGE_SIZE; } flush_tlb_all(); } /* * On the SRMMU we do not have the problems with limited tlb entries * for mapping kernel pages, so we just take things from the free page * pool. As a side effect we are putting a little too much pressure * on the gfp() subsystem. This setup also makes the logic of the * iommu mapping code a lot easier as we can transparently handle * mappings on the kernel stack without any special code as we did * need on the sun4c. */ static struct thread_info *srmmu_alloc_thread_info_node(int node) { struct thread_info *ret; ret = (struct thread_info *)__get_free_pages(GFP_KERNEL, THREAD_INFO_ORDER); #ifdef CONFIG_DEBUG_STACK_USAGE if (ret) memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER); #endif /* DEBUG_STACK_USAGE */ return ret; } static void srmmu_free_thread_info(struct thread_info *ti) { free_pages((unsigned long)ti, THREAD_INFO_ORDER); } /* tsunami.S */ extern void tsunami_flush_cache_all(void); extern void tsunami_flush_cache_mm(struct mm_struct *mm); extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void tsunami_flush_page_to_ram(unsigned long page); extern void tsunami_flush_page_for_dma(unsigned long page); extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void tsunami_flush_tlb_all(void); extern void tsunami_flush_tlb_mm(struct mm_struct *mm); extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void tsunami_setup_blockops(void); /* * Workaround, until we find what's going on with Swift. When low on memory, * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find * out it is already in page tables/ fault again on the same instruction. * I really don't understand it, have checked it and contexts * are right, flush_tlb_all is done as well, and it faults again... * Strange. -jj * * The following code is a deadwood that may be necessary when * we start to make precise page flushes again. --zaitcev */ static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep) { #if 0 static unsigned long last; unsigned int val; /* unsigned int n; */ if (address == last) { val = srmmu_hwprobe(address); if (val != 0 && pte_val(*ptep) != val) { printk("swift_update_mmu_cache: " "addr %lx put %08x probed %08x from %p\n", address, pte_val(*ptep), val, __builtin_return_address(0)); srmmu_flush_whole_tlb(); } } last = address; #endif } /* swift.S */ extern void swift_flush_cache_all(void); extern void swift_flush_cache_mm(struct mm_struct *mm); extern void swift_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void swift_flush_page_to_ram(unsigned long page); extern void swift_flush_page_for_dma(unsigned long page); extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void swift_flush_tlb_all(void); extern void swift_flush_tlb_mm(struct mm_struct *mm); extern void swift_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); #if 0 /* P3: deadwood to debug precise flushes on Swift. */ void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cctx, ctx1; page &= PAGE_MASK; if ((ctx1 = vma->vm_mm->context) != -1) { cctx = srmmu_get_context(); /* Is context # ever different from current context? P3 */ if (cctx != ctx1) { printk("flush ctx %02x curr %02x\n", ctx1, cctx); srmmu_set_context(ctx1); swift_flush_page(page); __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (page), "i" (ASI_M_FLUSH_PROBE)); srmmu_set_context(cctx); } else { /* Rm. prot. bits from virt. c. */ /* swift_flush_cache_all(); */ /* swift_flush_cache_page(vma, page); */ swift_flush_page(page); __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (page), "i" (ASI_M_FLUSH_PROBE)); /* same as above: srmmu_flush_tlb_page() */ } } } #endif /* * The following are all MBUS based SRMMU modules, and therefore could * be found in a multiprocessor configuration. On the whole, these * chips seems to be much more touchy about DVMA and page tables * with respect to cache coherency. */ /* Cypress flushes. */ static void cypress_flush_cache_all(void) { volatile unsigned long cypress_sucks; unsigned long faddr, tagval; flush_user_windows(); for(faddr = 0; faddr < 0x10000; faddr += 0x20) { __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : "=r" (tagval) : "r" (faddr), "r" (0x40000), "i" (ASI_M_DATAC_TAG)); /* If modified and valid, kick it. */ if((tagval & 0x60) == 0x60) cypress_sucks = *(unsigned long *)(0xf0020000 + faddr); } } static void cypress_flush_cache_mm(struct mm_struct *mm) { register unsigned long a, b, c, d, e, f, g; unsigned long flags, faddr; int octx; FLUSH_BEGIN(mm) flush_user_windows(); local_irq_save(flags); octx = srmmu_get_context(); srmmu_set_context(mm->context); a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; faddr = (0x10000 - 0x100); goto inside; do { faddr -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (faddr), "i" (ASI_M_FLUSH_CTX), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while(faddr); srmmu_set_context(octx); local_irq_restore(flags); FLUSH_END } static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; register unsigned long a, b, c, d, e, f, g; unsigned long flags, faddr; int octx; FLUSH_BEGIN(mm) flush_user_windows(); local_irq_save(flags); octx = srmmu_get_context(); srmmu_set_context(mm->context); a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; start &= SRMMU_REAL_PMD_MASK; while(start < end) { faddr = (start + (0x10000 - 0x100)); goto inside; do { faddr -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (faddr), "i" (ASI_M_FLUSH_SEG), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while (faddr != start); start += SRMMU_REAL_PMD_SIZE; } srmmu_set_context(octx); local_irq_restore(flags); FLUSH_END } static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page) { register unsigned long a, b, c, d, e, f, g; struct mm_struct *mm = vma->vm_mm; unsigned long flags, line; int octx; FLUSH_BEGIN(mm) flush_user_windows(); local_irq_save(flags); octx = srmmu_get_context(); srmmu_set_context(mm->context); a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; page &= PAGE_MASK; line = (page + PAGE_SIZE) - 0x100; goto inside; do { line -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (line), "i" (ASI_M_FLUSH_PAGE), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while(line != page); srmmu_set_context(octx); local_irq_restore(flags); FLUSH_END } /* Cypress is copy-back, at least that is how we configure it. */ static void cypress_flush_page_to_ram(unsigned long page) { register unsigned long a, b, c, d, e, f, g; unsigned long line; a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; page &= PAGE_MASK; line = (page + PAGE_SIZE) - 0x100; goto inside; do { line -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (line), "i" (ASI_M_FLUSH_PAGE), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while(line != page); } /* Cypress is also IO cache coherent. */ static void cypress_flush_page_for_dma(unsigned long page) { } /* Cypress has unified L2 VIPT, from which both instructions and data * are stored. It does not have an onboard icache of any sort, therefore * no flush is necessary. */ static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { } static void cypress_flush_tlb_all(void) { srmmu_flush_whole_tlb(); } static void cypress_flush_tlb_mm(struct mm_struct *mm) { FLUSH_BEGIN(mm) __asm__ __volatile__( "lda [%0] %3, %%g5\n\t" "sta %2, [%0] %3\n\t" "sta %%g0, [%1] %4\n\t" "sta %%g5, [%0] %3\n" : /* no outputs */ : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context), "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) : "g5"); FLUSH_END } static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long size; FLUSH_BEGIN(mm) start &= SRMMU_PGDIR_MASK; size = SRMMU_PGDIR_ALIGN(end) - start; __asm__ __volatile__( "lda [%0] %5, %%g5\n\t" "sta %1, [%0] %5\n" "1:\n\t" "subcc %3, %4, %3\n\t" "bne 1b\n\t" " sta %%g0, [%2 + %3] %6\n\t" "sta %%g5, [%0] %5\n" : /* no outputs */ : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200), "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) : "g5", "cc"); FLUSH_END } static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { struct mm_struct *mm = vma->vm_mm; FLUSH_BEGIN(mm) __asm__ __volatile__( "lda [%0] %3, %%g5\n\t" "sta %1, [%0] %3\n\t" "sta %%g0, [%2] %4\n\t" "sta %%g5, [%0] %3\n" : /* no outputs */ : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK), "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) : "g5"); FLUSH_END } /* viking.S */ extern void viking_flush_cache_all(void); extern void viking_flush_cache_mm(struct mm_struct *mm); extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void viking_flush_page_to_ram(unsigned long page); extern void viking_flush_page_for_dma(unsigned long page); extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); extern void viking_flush_page(unsigned long page); extern void viking_mxcc_flush_page(unsigned long page); extern void viking_flush_tlb_all(void); extern void viking_flush_tlb_mm(struct mm_struct *mm); extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void sun4dsmp_flush_tlb_all(void); extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm); extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); /* hypersparc.S */ extern void hypersparc_flush_cache_all(void); extern void hypersparc_flush_cache_mm(struct mm_struct *mm); extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void hypersparc_flush_page_to_ram(unsigned long page); extern void hypersparc_flush_page_for_dma(unsigned long page); extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void hypersparc_flush_tlb_all(void); extern void hypersparc_flush_tlb_mm(struct mm_struct *mm); extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void hypersparc_setup_blockops(void); /* * NOTE: All of this startup code assumes the low 16mb (approx.) of * kernel mappings are done with one single contiguous chunk of * ram. On small ram machines (classics mainly) we only get * around 8mb mapped for us. */ static void __init early_pgtable_allocfail(char *type) { prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); prom_halt(); } static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; while(start < end) { pgdp = pgd_offset_k(start); if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { pmdp = (pmd_t *) __srmmu_get_nocache( SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); srmmu_pgd_set(__nocache_fix(pgdp), pmdp); } pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(__nocache_fix(ptep), 0, PTE_SIZE); srmmu_pmd_set(__nocache_fix(pmdp), ptep); } if (start > (0xffffffffUL - PMD_SIZE)) break; start = (start + PMD_SIZE) & PMD_MASK; } } static void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; while(start < end) { pgdp = pgd_offset_k(start); if(srmmu_pgd_none(*pgdp)) { pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); srmmu_pgd_set(pgdp, pmdp); } pmdp = srmmu_pmd_offset(pgdp, start); if(srmmu_pmd_none(*pmdp)) { ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(ptep, 0, PTE_SIZE); srmmu_pmd_set(pmdp, ptep); } if (start > (0xffffffffUL - PMD_SIZE)) break; start = (start + PMD_SIZE) & PMD_MASK; } } /* * This is much cleaner than poking around physical address space * looking at the prom's page table directly which is what most * other OS's do. Yuck... this is much better. */ static void __init srmmu_inherit_prom_mappings(unsigned long start, unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ unsigned long prompte; while(start <= end) { if (start == 0) break; /* probably wrap around */ if(start == 0xfef00000) start = KADB_DEBUGGER_BEGVM; if(!(prompte = srmmu_hwprobe(start))) { start += PAGE_SIZE; continue; } /* A red snapper, see what it really is. */ what = 0; if(!(start & ~(SRMMU_REAL_PMD_MASK))) { if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) what = 1; } if(!(start & ~(SRMMU_PGDIR_MASK))) { if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == prompte) what = 2; } pgdp = pgd_offset_k(start); if(what == 2) { *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); start += SRMMU_PGDIR_SIZE; continue; } if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); srmmu_pgd_set(__nocache_fix(pgdp), pmdp); } pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(__nocache_fix(ptep), 0, PTE_SIZE); srmmu_pmd_set(__nocache_fix(pmdp), ptep); } if(what == 1) { /* * We bend the rule where all 16 PTPs in a pmd_t point * inside the same PTE page, and we leak a perfectly * good hardware PTE piece. Alternatives seem worse. */ unsigned int x; /* Index of HW PMD in soft cluster */ x = (start >> PMD_SHIFT) & 15; *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte; start += SRMMU_REAL_PMD_SIZE; continue; } ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); *(pte_t *)__nocache_fix(ptep) = __pte(prompte); start += PAGE_SIZE; } } #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID) /* Create a third-level SRMMU 16MB page mapping. */ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base) { pgd_t *pgdp = pgd_offset_k(vaddr); unsigned long big_pte; big_pte = KERNEL_PTE(phys_base >> 4); *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte); } /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) { unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK); unsigned long vstart = (vbase & SRMMU_PGDIR_MASK); unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); /* Map "low" memory only */ const unsigned long min_vaddr = PAGE_OFFSET; const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; if (vstart < min_vaddr || vstart >= max_vaddr) return vstart; if (vend > max_vaddr || vend < min_vaddr) vend = max_vaddr; while(vstart < vend) { do_large_mapping(vstart, pstart); vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; } return vstart; } static inline void memprobe_error(char *msg) { prom_printf(msg); prom_printf("Halting now...\n"); prom_halt(); } static inline void map_kernel(void) { int i; if (phys_base > 0) { do_large_mapping(PAGE_OFFSET, phys_base); } for (i = 0; sp_banks[i].num_bytes != 0; i++) { map_spbank((unsigned long)__va(sp_banks[i].base_addr), i); } BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE); } /* Paging initialization on the Sparc Reference MMU. */ extern void sparc_context_init(int); void (*poke_srmmu)(void) __cpuinitdata = NULL; extern unsigned long bootmem_init(unsigned long *pages_avail); void __init srmmu_paging_init(void) { int i; phandle cpunode; char node_str[128]; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned long pages_avail; sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ if (sparc_cpu_model == sun4d) num_contexts = 65536; /* We know it is Viking */ else { /* Find the number of contexts on the srmmu. */ cpunode = prom_getchild(prom_root_node); num_contexts = 0; while(cpunode != 0) { prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); break; } cpunode = prom_getsibling(cpunode); } } if(!num_contexts) { prom_printf("Something wrong, can't find cpu node in paging_init.\n"); prom_halt(); } pages_avail = 0; last_valid_pfn = bootmem_init(&pages_avail); srmmu_nocache_calcsize(); srmmu_nocache_init(); srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); map_kernel(); /* ctx table has to be physically aligned to its size */ srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); for(i = 0; i < num_contexts; i++) srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); flush_cache_all(); srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); #ifdef CONFIG_SMP /* Stop from hanging here... */ local_flush_tlb_all(); #else flush_tlb_all(); #endif poke_srmmu(); srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); srmmu_allocate_ptable_skeleton( __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP); srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); pgd = pgd_offset_k(PKMAP_BASE); pmd = srmmu_pmd_offset(pgd, PKMAP_BASE); pte = srmmu_pte_offset(pmd, PKMAP_BASE); pkmap_page_table = pte; flush_cache_all(); flush_tlb_all(); sparc_context_init(num_contexts); kmap_init(); { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES]; unsigned long npages; int znum; for (znum = 0; znum < MAX_NR_ZONES; znum++) zones_size[znum] = zholes_size[znum] = 0; npages = max_low_pfn - pfn_base; zones_size[ZONE_DMA] = npages; zholes_size[ZONE_DMA] = npages - pages_avail; npages = highend_pfn - max_low_pfn; zones_size[ZONE_HIGHMEM] = npages; zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); free_area_init_node(0, zones_size, pfn_base, zholes_size); } } static void srmmu_mmu_info(struct seq_file *m) { seq_printf(m, "MMU type\t: %s\n" "contexts\t: %d\n" "nocache total\t: %ld\n" "nocache used\t: %d\n", srmmu_name, num_contexts, srmmu_nocache_size, srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); } static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) { } static void srmmu_destroy_context(struct mm_struct *mm) { if(mm->context != NO_CONTEXT) { flush_cache_mm(mm); srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); flush_tlb_mm(mm); spin_lock(&srmmu_context_spinlock); free_context(mm->context); spin_unlock(&srmmu_context_spinlock); mm->context = NO_CONTEXT; } } /* Init various srmmu chip types. */ static void __init srmmu_is_bad(void) { prom_printf("Could not determine SRMMU chip type.\n"); prom_halt(); } static void __init init_vac_layout(void) { phandle nd; int cache_lines; char node_str[128]; #ifdef CONFIG_SMP int cpu = 0; unsigned long max_size = 0; unsigned long min_line_size = 0x10000000; #endif nd = prom_getchild(prom_root_node); while((nd = prom_getsibling(nd)) != 0) { prom_getstring(nd, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { vac_line_size = prom_getint(nd, "cache-line-size"); if (vac_line_size == -1) { prom_printf("can't determine cache-line-size, " "halting.\n"); prom_halt(); } cache_lines = prom_getint(nd, "cache-nlines"); if (cache_lines == -1) { prom_printf("can't determine cache-nlines, halting.\n"); prom_halt(); } vac_cache_size = cache_lines * vac_line_size; #ifdef CONFIG_SMP if(vac_cache_size > max_size) max_size = vac_cache_size; if(vac_line_size < min_line_size) min_line_size = vac_line_size; //FIXME: cpus not contiguous!! cpu++; if (cpu >= nr_cpu_ids || !cpu_online(cpu)) break; #else break; #endif } } if(nd == 0) { prom_printf("No CPU nodes found, halting.\n"); prom_halt(); } #ifdef CONFIG_SMP vac_cache_size = max_size; vac_line_size = min_line_size; #endif printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n", (int)vac_cache_size, (int)vac_line_size); } static void __cpuinit poke_hypersparc(void) { volatile unsigned long clear; unsigned long mreg = srmmu_get_mmureg(); hyper_flush_unconditional_combined(); mreg &= ~(HYPERSPARC_CWENABLE); mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE); mreg |= (HYPERSPARC_CMODE); srmmu_set_mmureg(mreg); #if 0 /* XXX I think this is bad news... -DaveM */ hyper_clear_all_tags(); #endif put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE); hyper_flush_whole_icache(); clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); } static void __init init_hypersparc(void) { srmmu_name = "ROSS HyperSparc"; srmmu_modtype = HyperSparc; init_vac_layout(); is_hypersparc = 1; BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP); poke_srmmu = poke_hypersparc; hypersparc_setup_blockops(); } static void __cpuinit poke_cypress(void) { unsigned long mreg = srmmu_get_mmureg(); unsigned long faddr, tagval; volatile unsigned long cypress_sucks; volatile unsigned long clear; clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); if (!(mreg & CYPRESS_CENABLE)) { for(faddr = 0x0; faddr < 0x10000; faddr += 20) { __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t" "sta %%g0, [%0] %2\n\t" : : "r" (faddr), "r" (0x40000), "i" (ASI_M_DATAC_TAG)); } } else { for(faddr = 0; faddr < 0x10000; faddr += 0x20) { __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : "=r" (tagval) : "r" (faddr), "r" (0x40000), "i" (ASI_M_DATAC_TAG)); /* If modified and valid, kick it. */ if((tagval & 0x60) == 0x60) cypress_sucks = *(unsigned long *) (0xf0020000 + faddr); } } /* And one more, for our good neighbor, Mr. Broken Cypress. */ clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE); srmmu_set_mmureg(mreg); } static void __init init_cypress_common(void) { init_vac_layout(); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); poke_srmmu = poke_cypress; } static void __init init_cypress_604(void) { srmmu_name = "ROSS Cypress-604(UP)"; srmmu_modtype = Cypress; init_cypress_common(); } static void __init init_cypress_605(unsigned long mrev) { srmmu_name = "ROSS Cypress-605(MP)"; if(mrev == 0xe) { srmmu_modtype = Cypress_vE; hwbug_bitmask |= HWBUG_COPYBACK_BROKEN; } else { if(mrev == 0xd) { srmmu_modtype = Cypress_vD; hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN; } else { srmmu_modtype = Cypress; } } init_cypress_common(); } static void __cpuinit poke_swift(void) { unsigned long mreg; /* Clear any crap from the cache or else... */ swift_flush_cache_all(); /* Enable I & D caches */ mreg = srmmu_get_mmureg(); mreg |= (SWIFT_IE | SWIFT_DE); /* * The Swift branch folding logic is completely broken. At * trap time, if things are just right, if can mistakenly * think that a trap is coming from kernel mode when in fact * it is coming from user mode (it mis-executes the branch in * the trap code). So you see things like crashme completely * hosing your machine which is completely unacceptable. Turn * this shit off... nice job Fujitsu. */ mreg &= ~(SWIFT_BF); srmmu_set_mmureg(mreg); } #define SWIFT_MASKID_ADDR 0x10003018 static void __init init_swift(void) { unsigned long swift_rev; __asm__ __volatile__("lda [%1] %2, %0\n\t" "srl %0, 0x18, %0\n\t" : "=r" (swift_rev) : "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); srmmu_name = "Fujitsu Swift"; switch(swift_rev) { case 0x11: case 0x20: case 0x23: case 0x30: srmmu_modtype = Swift_lots_o_bugs; hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN); /* * Gee george, I wonder why Sun is so hush hush about * this hardware bug... really braindamage stuff going * on here. However I think we can find a way to avoid * all of the workaround overhead under Linux. Basically, * any page fault can cause kernel pages to become user * accessible (the mmu gets confused and clears some of * the ACC bits in kernel ptes). Aha, sounds pretty * horrible eh? But wait, after extensive testing it appears * that if you use pgd_t level large kernel pte's (like the * 4MB pages on the Pentium) the bug does not get tripped * at all. This avoids almost all of the major overhead. * Welcome to a world where your vendor tells you to, * "apply this kernel patch" instead of "sorry for the * broken hardware, send it back and we'll give you * properly functioning parts" */ break; case 0x25: case 0x31: srmmu_modtype = Swift_bad_c; hwbug_bitmask |= HWBUG_KERN_CBITBROKEN; /* * You see Sun allude to this hardware bug but never * admit things directly, they'll say things like, * "the Swift chip cache problems" or similar. */ break; default: srmmu_modtype = Swift_ok; break; }; BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); flush_page_for_dma_global = 0; /* * Are you now convinced that the Swift is one of the * biggest VLSI abortions of all time? Bravo Fujitsu! * Fujitsu, the !#?!%$'d up processor people. I bet if * you examined the microcode of the Swift you'd find * XXX's all over the place. */ poke_srmmu = poke_swift; } static void turbosparc_flush_cache_all(void) { flush_user_windows(); turbosparc_idflash_clear(); } static void turbosparc_flush_cache_mm(struct mm_struct *mm) { FLUSH_BEGIN(mm) flush_user_windows(); turbosparc_idflash_clear(); FLUSH_END } static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { FLUSH_BEGIN(vma->vm_mm) flush_user_windows(); turbosparc_idflash_clear(); FLUSH_END } static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page) { FLUSH_BEGIN(vma->vm_mm) flush_user_windows(); if (vma->vm_flags & VM_EXEC) turbosparc_flush_icache(); turbosparc_flush_dcache(); FLUSH_END } /* TurboSparc is copy-back, if we turn it on, but this does not work. */ static void turbosparc_flush_page_to_ram(unsigned long page) { #ifdef TURBOSPARC_WRITEBACK volatile unsigned long clear; if (srmmu_hwprobe(page)) turbosparc_flush_page_cache(page); clear = srmmu_get_fstatus(); #endif } static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { } static void turbosparc_flush_page_for_dma(unsigned long page) { turbosparc_flush_dcache(); } static void turbosparc_flush_tlb_all(void) { srmmu_flush_whole_tlb(); } static void turbosparc_flush_tlb_mm(struct mm_struct *mm) { FLUSH_BEGIN(mm) srmmu_flush_whole_tlb(); FLUSH_END } static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { FLUSH_BEGIN(vma->vm_mm) srmmu_flush_whole_tlb(); FLUSH_END } static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { FLUSH_BEGIN(vma->vm_mm) srmmu_flush_whole_tlb(); FLUSH_END } static void __cpuinit poke_turbosparc(void) { unsigned long mreg = srmmu_get_mmureg(); unsigned long ccreg; /* Clear any crap from the cache or else... */ turbosparc_flush_cache_all(); mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ srmmu_set_mmureg(mreg); ccreg = turbosparc_get_ccreg(); #ifdef TURBOSPARC_WRITEBACK ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */ ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE); /* Write-back D-cache, emulate VLSI * abortion number three, not number one */ #else /* For now let's play safe, optimize later */ ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE); /* Do DVMA snooping in Dcache, Write-thru D-cache */ ccreg &= ~(TURBOSPARC_uS2); /* Emulate VLSI abortion number three, not number one */ #endif switch (ccreg & 7) { case 0: /* No SE cache */ case 7: /* Test mode */ break; default: ccreg |= (TURBOSPARC_SCENABLE); } turbosparc_set_ccreg (ccreg); mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ srmmu_set_mmureg(mreg); } static void __init init_turbosparc(void) { srmmu_name = "Fujitsu TurboSparc"; srmmu_modtype = TurboSparc; BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM); poke_srmmu = poke_turbosparc; } static void __cpuinit poke_tsunami(void) { unsigned long mreg = srmmu_get_mmureg(); tsunami_flush_icache(); tsunami_flush_dcache(); mreg &= ~TSUNAMI_ITD; mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB); srmmu_set_mmureg(mreg); } static void __init init_tsunami(void) { /* * Tsunami's pretty sane, Sun and TI actually got it * somewhat right this time. Fujitsu should have * taken some lessons from them. */ srmmu_name = "TI Tsunami"; srmmu_modtype = Tsunami; BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); poke_srmmu = poke_tsunami; tsunami_setup_blockops(); } static void __cpuinit poke_viking(void) { unsigned long mreg = srmmu_get_mmureg(); static int smp_catch; if(viking_mxcc_present) { unsigned long mxcc_control = mxcc_get_creg(); mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); mxcc_control &= ~(MXCC_CTL_RRC); mxcc_set_creg(mxcc_control); /* * We don't need memory parity checks. * XXX This is a mess, have to dig out later. ecd. viking_mxcc_turn_off_parity(&mreg, &mxcc_control); */ /* We do cache ptables on MXCC. */ mreg |= VIKING_TCENABLE; } else { unsigned long bpreg; mreg &= ~(VIKING_TCENABLE); if(smp_catch++) { /* Must disable mixed-cmd mode here for other cpu's. */ bpreg = viking_get_bpreg(); bpreg &= ~(VIKING_ACTION_MIX); viking_set_bpreg(bpreg); /* Just in case PROM does something funny. */ msi_set_sync(); } } mreg |= VIKING_SPENABLE; mreg |= (VIKING_ICENABLE | VIKING_DCENABLE); mreg |= VIKING_SBENABLE; mreg &= ~(VIKING_ACENABLE); srmmu_set_mmureg(mreg); } static void __init init_viking(void) { unsigned long mreg = srmmu_get_mmureg(); /* Ahhh, the viking. SRMMU VLSI abortion number two... */ if(mreg & VIKING_MMODE) { srmmu_name = "TI Viking"; viking_mxcc_present = 0; msi_set_sync(); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); /* * We need this to make sure old viking takes no hits * on it's cache for dma snoops to workaround the * "load from non-cacheable memory" interrupt bug. * This is only necessary because of the new way in * which we use the IOMMU. */ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); flush_page_for_dma_global = 0; } else { srmmu_name = "TI Viking/MXCC"; viking_mxcc_present = 1; srmmu_cache_pagetables = 1; /* MXCC vikings lack the DMA snooping bug. */ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); } BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM); #ifdef CONFIG_SMP if (sparc_cpu_model == sun4d) { BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM); } else #endif { BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); } BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); poke_srmmu = poke_viking; } #ifdef CONFIG_SPARC_LEON void __init poke_leonsparc(void) { } void __init init_leon(void) { srmmu_name = "LEON"; BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP); poke_srmmu = poke_leonsparc; srmmu_cache_pagetables = 0; leon_flush_during_switch = leon_flush_needed(); } #endif /* Probe for the srmmu chip version. */ static void __init get_srmmu_type(void) { unsigned long mreg, psr; unsigned long mod_typ, mod_rev, psr_typ, psr_vers; srmmu_modtype = SRMMU_INVAL_MOD; hwbug_bitmask = 0; mreg = srmmu_get_mmureg(); psr = get_psr(); mod_typ = (mreg & 0xf0000000) >> 28; mod_rev = (mreg & 0x0f000000) >> 24; psr_typ = (psr >> 28) & 0xf; psr_vers = (psr >> 24) & 0xf; /* First, check for sparc-leon. */ if (sparc_cpu_model == sparc_leon) { init_leon(); return; } /* Second, check for HyperSparc or Cypress. */ if(mod_typ == 1) { switch(mod_rev) { case 7: /* UP or MP Hypersparc */ init_hypersparc(); break; case 0: case 2: /* Uniprocessor Cypress */ init_cypress_604(); break; case 10: case 11: case 12: /* _REALLY OLD_ Cypress MP chips... */ case 13: case 14: case 15: /* MP Cypress mmu/cache-controller */ init_cypress_605(mod_rev); break; default: /* Some other Cypress revision, assume a 605. */ init_cypress_605(mod_rev); break; }; return; } /* * Now Fujitsu TurboSparc. It might happen that it is * in Swift emulation mode, so we will check later... */ if (psr_typ == 0 && psr_vers == 5) { init_turbosparc(); return; } /* Next check for Fujitsu Swift. */ if(psr_typ == 0 && psr_vers == 4) { phandle cpunode; char node_str[128]; /* Look if it is not a TurboSparc emulating Swift... */ cpunode = prom_getchild(prom_root_node); while((cpunode = prom_getsibling(cpunode)) != 0) { prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { if (!prom_getintdefault(cpunode, "psr-implementation", 1) && prom_getintdefault(cpunode, "psr-version", 1) == 5) { init_turbosparc(); return; } break; } } init_swift(); return; } /* Now the Viking family of srmmu. */ if(psr_typ == 4 && ((psr_vers == 0) || ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { init_viking(); return; } /* Finally the Tsunami. */ if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { init_tsunami(); return; } /* Oh well */ srmmu_is_bad(); } /* don't laugh, static pagetables */ static void srmmu_check_pgt_cache(int low, int high) { } extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, tsetup_mmu_patchme, rtrap_mmu_patchme; extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk, tsetup_srmmu_stackchk, srmmu_rett_stackchk; extern unsigned long srmmu_fault; #define PATCH_BRANCH(insn, dest) do { \ iaddr = &(insn); \ daddr = &(dest); \ *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \ } while(0) static void __init patch_window_trap_handlers(void) { unsigned long *iaddr, *daddr; PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk); PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk); PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk); PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk); PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault); PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault); PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault); } #ifdef CONFIG_SMP /* Local cross-calls. */ static void smp_flush_page_for_dma(unsigned long page) { xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); local_flush_page_for_dma(page); } #endif static pte_t srmmu_pgoff_to_pte(unsigned long pgoff) { return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE); } static unsigned long srmmu_pte_to_pgoff(pte_t pte) { return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT; } static pgprot_t srmmu_pgprot_noncached(pgprot_t prot) { prot &= ~__pgprot(SRMMU_CACHE); return prot; } /* Load up routines and constants for sun4m and sun4d mmu */ void __init ld_mmu_srmmu(void) { extern void ld_mmu_iommu(void); extern void ld_mmu_iounit(void); extern void ___xchg32_sun4md(void); BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD); BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); /* Functions */ BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM); #ifndef CONFIG_SMP BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); #endif BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM); BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE); BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY); BTFIXUPSET_HALF(pte_youngi, SRMMU_REF); BTFIXUPSET_HALF(pte_filei, SRMMU_FILE); BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE); BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY); BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF); BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE)); BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY)); BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF)); BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(alloc_thread_info_node, srmmu_alloc_thread_info_node, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM); get_srmmu_type(); patch_window_trap_handlers(); #ifdef CONFIG_SMP /* El switcheroo... */ BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm); BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range); BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page); BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram); BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns); BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma); BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); if (sparc_cpu_model != sun4d && sparc_cpu_model != sparc_leon) { BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); } BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); if (poke_srmmu == poke_viking) { /* Avoid unnecessary cross calls. */ BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); } #endif if (sparc_cpu_model == sun4d) ld_mmu_iounit(); else ld_mmu_iommu(); #ifdef CONFIG_SMP if (sparc_cpu_model == sun4d) sun4d_init_smp(); else if (sparc_cpu_model == sparc_leon) leon_init_smp(); else sun4m_init_smp(); #endif }
gpl-2.0
NamelessRom/android_kernel_oppo_n3
arch/arm/mach-msm/ocmem.c
660
21599
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/rbtree.h> #include <linux/genalloc.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <mach/ocmem_priv.h> #define OCMEM_REGION_CTL_BASE 0xFDD0003C #define OCMEM_REGION_CTL_SIZE 0xFD0 #define GRAPHICS_REGION_CTL (0x17F000) struct ocmem_partition { const char *name; int id; unsigned long p_start; unsigned long p_size; unsigned long p_min; unsigned int p_tail; }; struct ocmem_zone zones[OCMEM_CLIENT_MAX]; struct ocmem_zone *get_zone(unsigned id) { if (id < OCMEM_GRAPHICS || id >= OCMEM_CLIENT_MAX) return NULL; else return &zones[id]; } static struct ocmem_plat_data *ocmem_pdata; #define CLIENT_NAME_MAX 10 /* Must be in sync with enum ocmem_client */ static const char *client_names[OCMEM_CLIENT_MAX] = { "graphics", "video", "camera", "hp_audio", "voice", "lp_audio", "sensors", "other_os", }; /* Must be in sync with enum ocmem_zstat_item */ static const char *zstat_names[NR_OCMEM_ZSTAT_ITEMS] = { "Allocation requests", "Synchronous allocations", "Ranged allocations", "Asynchronous allocations", "Allocation failures", "Allocations grown", "Allocations freed", "Allocations shrunk", "OCMEM maps", "Map failures", "OCMEM unmaps", "Unmap failures", "Transfers to OCMEM", "Transfers to DDR", "Transfer failures", "Evictions", "Restorations", "Dump requests", "Dump completed", }; struct ocmem_quota_table { const char *name; int id; unsigned long start; unsigned long size; unsigned long min; unsigned int tail; }; /* This static table will go away with device tree support */ static struct ocmem_quota_table qt[OCMEM_CLIENT_MAX] = { /* name, id, start, size, min, tail */ { "graphics", OCMEM_GRAPHICS, 0x0, 0x100000, 0x80000, 0}, { "video", OCMEM_VIDEO, 0x100000, 0x80000, 0x55000, 1}, { "camera", OCMEM_CAMERA, 0x0, 0x0, 0x0, 0}, { "voice", OCMEM_VOICE, 0x0, 0x0, 0x0, 0 }, { "hp_audio", OCMEM_HP_AUDIO, 0x0, 0x0, 0x0, 0}, { "lp_audio", OCMEM_LP_AUDIO, 0x80000, 0xA0000, 0xA0000, 0}, { "other_os", OCMEM_OTHER_OS, 0x120000, 0x20000, 0x20000, 0}, { "sensors", OCMEM_SENSORS, 0x140000, 0x40000, 0x40000, 0}, }; static inline int get_id(const char *name) { int i = 0; for (i = 0 ; i < OCMEM_CLIENT_MAX; i++) { if (strncmp(name, client_names[i], CLIENT_NAME_MAX) == 0) return i; } return -EINVAL; } int check_id(int id) { return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS); } const char *get_name(int id) { if (!check_id(id)) return "Unknown"; return client_names[id]; } inline unsigned long phys_to_offset(unsigned long addr) { if (!ocmem_pdata) return 0; if (addr < ocmem_pdata->base || addr > (ocmem_pdata->base + ocmem_pdata->size)) return 0; return addr - ocmem_pdata->base; } inline unsigned long offset_to_phys(unsigned long offset) { if (!ocmem_pdata) return 0; if (offset > ocmem_pdata->size) return 0; return offset + ocmem_pdata->base; } inline int zone_active(int id) { struct ocmem_zone *z = get_zone(id); if (z) return z->active == true ? 1 : 0; else return 0; } inline void inc_ocmem_stat(struct ocmem_zone *z, enum ocmem_zstat_item item) { if (!z) return; atomic_long_inc(&z->z_stat[item]); } inline unsigned long get_ocmem_stat(struct ocmem_zone *z, enum ocmem_zstat_item item) { if (!z) return 0; else return atomic_long_read(&z->z_stat[item]); } static struct ocmem_plat_data *parse_static_config(struct platform_device *pdev) { struct ocmem_plat_data *pdata = NULL; struct ocmem_partition *parts = NULL; struct device *dev = &pdev->dev; unsigned nr_parts = 0; int i; int j; pdata = devm_kzalloc(dev, sizeof(struct ocmem_plat_data), GFP_KERNEL); if (!pdata) { dev_err(dev, "Unable to allocate memory for" " platform data\n"); return NULL; } for (i = 0 ; i < ARRAY_SIZE(qt); i++) if (qt[i].size != 0x0) nr_parts++; if (nr_parts == 0x0) { dev_err(dev, "No valid ocmem partitions\n"); return NULL; } else dev_info(dev, "Total partitions = %d\n", nr_parts); parts = devm_kzalloc(dev, sizeof(struct ocmem_partition) * nr_parts, GFP_KERNEL); if (!parts) { dev_err(dev, "Unable to allocate memory for" " partition data\n"); return NULL; } for (i = 0, j = 0; i < ARRAY_SIZE(qt); i++) { if (qt[i].size == 0x0) { dev_dbg(dev, "Skipping creation of pool for %s\n", qt[i].name); continue; } parts[j].id = qt[i].id; parts[j].p_size = qt[i].size; parts[j].p_start = qt[i].start; parts[j].p_min = qt[i].min; parts[j].p_tail = qt[i].tail; j++; } BUG_ON(j != nr_parts); pdata->nr_parts = nr_parts; pdata->parts = parts; pdata->base = OCMEM_PHYS_BASE; pdata->size = OCMEM_PHYS_SIZE; return pdata; } int __devinit of_ocmem_parse_regions(struct device *dev, struct ocmem_partition **part) { const char *name; struct device_node *child = NULL; int nr_parts = 0; int i = 0; int rc = 0; int id = -1; /*Compute total partitions */ for_each_child_of_node(dev->of_node, child) nr_parts++; if (nr_parts == 0) return 0; *part = devm_kzalloc(dev, nr_parts * sizeof(**part), GFP_KERNEL); if (!*part) return -ENOMEM; for_each_child_of_node(dev->of_node, child) { const u32 *addr; u32 min; u64 size; u64 p_start; addr = of_get_address(child, 0, &size, NULL); if (!addr) { dev_err(dev, "Invalid addr for partition %d, ignored\n", i); continue; } rc = of_property_read_u32(child, "qcom,ocmem-part-min", &min); if (rc) { dev_err(dev, "No min for partition %d, ignored\n", i); continue; } rc = of_property_read_string(child, "qcom,ocmem-part-name", &name); if (rc) { dev_err(dev, "No name for partition %d, ignored\n", i); continue; } id = get_id(name); if (id < 0) { dev_err(dev, "Ignoring invalid partition %s\n", name); continue; } p_start = of_translate_address(child, addr); if (p_start == OF_BAD_ADDR) { dev_err(dev, "Invalid offset for partition %d\n", i); continue; } (*part)[i].p_start = p_start; (*part)[i].p_size = size; (*part)[i].id = id; (*part)[i].name = name; (*part)[i].p_min = min; (*part)[i].p_tail = of_property_read_bool(child, "tail"); i++; } return i; } #if defined(CONFIG_MSM_OCMEM_LOCAL_POWER_CTRL) static int parse_power_ctrl_config(struct ocmem_plat_data *pdata, struct device_node *node) { pdata->rpm_pwr_ctrl = false; pdata->rpm_rsc_type = ~0x0; return 0; } #else static int parse_power_ctrl_config(struct ocmem_plat_data *pdata, struct device_node *node) { unsigned rsc_type = ~0x0; pdata->rpm_pwr_ctrl = false; if (of_property_read_u32(node, "qcom,resource-type", &rsc_type)) return -EINVAL; pdata->rpm_pwr_ctrl = true; pdata->rpm_rsc_type = rsc_type; return 0; } #endif /* CONFIG_MSM_OCMEM_LOCAL_POWER_CTRL */ /* Core Clock Operations */ int ocmem_enable_core_clock(void) { int ret; ret = clk_prepare_enable(ocmem_pdata->core_clk); if (ret) { pr_err("ocmem: Failed to enable core clock\n"); return ret; } pr_debug("ocmem: Enabled core clock\n"); return 0; } void ocmem_disable_core_clock(void) { clk_disable_unprepare(ocmem_pdata->core_clk); pr_debug("ocmem: Disabled core clock\n"); } /* Branch Clock Operations */ int ocmem_enable_iface_clock(void) { int ret; if (!ocmem_pdata->iface_clk) return 0; ret = clk_prepare_enable(ocmem_pdata->iface_clk); if (ret) { pr_err("ocmem: Failed to disable iface clock\n"); return ret; } pr_debug("ocmem: Enabled iface clock\n"); return 0; } void ocmem_disable_iface_clock(void) { if (!ocmem_pdata->iface_clk) return; clk_disable_unprepare(ocmem_pdata->iface_clk); pr_debug("ocmem: Disabled iface clock\n"); } static struct ocmem_plat_data * __devinit parse_dt_config (struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = pdev->dev.of_node; struct ocmem_plat_data *pdata = NULL; struct ocmem_partition *parts = NULL; struct resource *ocmem_irq; struct resource *dm_irq; struct resource *ocmem_mem; struct resource *reg_base; struct resource *br_base; struct resource *dm_base; struct resource *ocmem_mem_io; unsigned nr_parts = 0; unsigned nr_regions = 0; unsigned nr_macros = 0; pdata = devm_kzalloc(dev, sizeof(struct ocmem_plat_data), GFP_KERNEL); if (!pdata) { dev_err(dev, "Unable to allocate memory for platform data\n"); return NULL; } ocmem_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ocmem_physical"); if (!ocmem_mem) { dev_err(dev, "No OCMEM memory resource\n"); return NULL; } ocmem_mem_io = request_mem_region(ocmem_mem->start, resource_size(ocmem_mem), pdev->name); if (!ocmem_mem_io) { dev_err(dev, "Could not claim OCMEM memory\n"); return NULL; } pdata->base = ocmem_mem->start; pdata->size = resource_size(ocmem_mem); pdata->vbase = devm_ioremap_nocache(dev, ocmem_mem->start, resource_size(ocmem_mem)); if (!pdata->vbase) { dev_err(dev, "Could not ioremap ocmem memory\n"); return NULL; } reg_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ocmem_ctrl_physical"); if (!reg_base) { dev_err(dev, "No OCMEM register resource\n"); return NULL; } pdata->reg_base = devm_ioremap_nocache(dev, reg_base->start, resource_size(reg_base)); if (!pdata->reg_base) { dev_err(dev, "Could not ioremap register map\n"); return NULL; } br_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "br_ctrl_physical"); if (!br_base) { dev_err(dev, "No OCMEM BR resource\n"); return NULL; } pdata->br_base = devm_ioremap_nocache(dev, br_base->start, resource_size(br_base)); if (!pdata->br_base) { dev_err(dev, "Could not ioremap BR resource\n"); return NULL; } dm_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dm_ctrl_physical"); if (!dm_base) { dev_err(dev, "No OCMEM DM resource\n"); return NULL; } pdata->dm_base = devm_ioremap_nocache(dev, dm_base->start, resource_size(dm_base)); if (!pdata->dm_base) { dev_err(dev, "Could not ioremap DM resource\n"); return NULL; } ocmem_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ocmem_irq"); if (!ocmem_irq) { dev_err(dev, "No OCMEM IRQ resource\n"); return NULL; } dm_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "dm_irq"); if (!dm_irq) { dev_err(dev, "No DM IRQ resource\n"); return NULL; } if (of_property_read_u32(node, "qcom,ocmem-num-regions", &nr_regions)) { dev_err(dev, "No OCMEM memory regions specified\n"); } if (nr_regions == 0) { dev_err(dev, "No hardware memory regions found\n"); return NULL; } if (of_property_read_u32(node, "qcom,ocmem-num-macros", &nr_macros)) { dev_err(dev, "No OCMEM macros specified\n"); } if (nr_macros == 0) { dev_err(dev, "No hardware macros found\n"); return NULL; } /* Figure out the number of partititons */ nr_parts = of_ocmem_parse_regions(dev, &parts); if (nr_parts <= 0) { dev_err(dev, "No valid OCMEM partitions found\n"); goto pdata_error; } else dev_dbg(dev, "Found %d ocmem partitions\n", nr_parts); if (parse_power_ctrl_config(pdata, node)) { dev_err(dev, "No OCMEM RPM Resource specified\n"); return NULL; } pdata->nr_parts = nr_parts; pdata->parts = parts; pdata->nr_regions = nr_regions; pdata->nr_macros = nr_macros; pdata->ocmem_irq = ocmem_irq->start; pdata->dm_irq = dm_irq->start; return pdata; pdata_error: return NULL; } static int ocmem_zones_show(struct seq_file *f, void *dummy) { unsigned i = 0; for (i = OCMEM_GRAPHICS; i < OCMEM_CLIENT_MAX; i++) { struct ocmem_zone *z = get_zone(i); if (z && z->active == true) seq_printf(f, "zone %s\t:0x%08lx - 0x%08lx (%4ld KB)\n", get_name(z->owner), z->z_start, z->z_end - 1, (z->z_end - z->z_start)/SZ_1K); } return 0; } static int ocmem_zones_open(struct inode *inode, struct file *file) { return single_open(file, ocmem_zones_show, inode->i_private); } static const struct file_operations zones_show_fops = { .open = ocmem_zones_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int ocmem_stats_show(struct seq_file *f, void *dummy) { unsigned i = 0; unsigned j = 0; for (i = OCMEM_GRAPHICS; i < OCMEM_CLIENT_MAX; i++) { struct ocmem_zone *z = get_zone(i); if (z && z->active == true) { seq_printf(f, "zone %s:\n", get_name(z->owner)); for (j = 0 ; j < ARRAY_SIZE(zstat_names); j++) { seq_printf(f, "\t %s: %lu\n", zstat_names[j], get_ocmem_stat(z, j)); } } } return 0; } static int ocmem_stats_open(struct inode *inode, struct file *file) { return single_open(file, ocmem_stats_show, inode->i_private); } static const struct file_operations stats_show_fops = { .open = ocmem_stats_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int ocmem_timing_show(struct seq_file *f, void *dummy) { unsigned i = 0; for (i = OCMEM_GRAPHICS; i < OCMEM_CLIENT_MAX; i++) { struct ocmem_zone *z = get_zone(i); if (z && z->active == true) seq_printf(f, "zone %s\t: alloc_delay:[max:%d, min:%d, total:%llu,cnt:%lu] free_delay:[max:%d, min:%d, total:%llu, cnt:%lu]\n", get_name(z->owner), z->max_alloc_time, z->min_alloc_time, z->total_alloc_time, get_ocmem_stat(z, 1), z->max_free_time, z->min_free_time, z->total_free_time, get_ocmem_stat(z, 6)); } return 0; } static int ocmem_timing_open(struct inode *inode, struct file *file) { return single_open(file, ocmem_timing_show, inode->i_private); } static const struct file_operations timing_show_fops = { .open = ocmem_timing_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int ocmem_zone_init(struct platform_device *pdev) { int ret = -1; int i = 0; unsigned active_zones = 0; struct ocmem_zone *zone = NULL; struct ocmem_zone_ops *z_ops = NULL; struct device *dev = &pdev->dev; unsigned long start; struct ocmem_plat_data *pdata = NULL; pdata = platform_get_drvdata(pdev); for (i = 0; i < pdata->nr_parts; i++) { struct ocmem_partition *part = &pdata->parts[i]; zone = get_zone(part->id); zone->active = false; dev_dbg(dev, "Partition %d, start %lx, size %lx for %s\n", i, part->p_start, part->p_size, client_names[part->id]); if (part->p_size > pdata->size) { dev_alert(dev, "Quota > ocmem_size for id:%d\n", part->id); continue; } zone->z_pool = gen_pool_create(PAGE_SHIFT, -1); if (!zone->z_pool) { dev_alert(dev, "Creating pool failed for id:%d\n", part->id); return -EBUSY; } start = part->p_start; ret = gen_pool_add(zone->z_pool, start, part->p_size, -1); if (ret < 0) { gen_pool_destroy(zone->z_pool); dev_alert(dev, "Unable to back pool %d with " "buffer:%lx\n", part->id, part->p_size); return -EBUSY; } /* Initialize zone allocators */ z_ops = devm_kzalloc(dev, sizeof(struct ocmem_zone_ops), GFP_KERNEL); if (!z_ops) { pr_alert("ocmem: Unable to allocate memory for" "zone ops:%d\n", i); return -EBUSY; } /* Initialize zone parameters */ zone->z_start = start; zone->z_head = zone->z_start; zone->z_end = start + part->p_size; zone->z_tail = zone->z_end; zone->z_free = part->p_size; zone->owner = part->id; zone->active_regions = 0; zone->max_regions = 0; INIT_LIST_HEAD(&zone->req_list); zone->z_ops = z_ops; zone->max_alloc_time = 0; zone->min_alloc_time = 0xFFFFFFFF; zone->total_alloc_time = 0; zone->max_free_time = 0; zone->min_free_time = 0xFFFFFFFF; zone->total_free_time = 0; if (part->p_tail) { z_ops->allocate = allocate_tail; z_ops->free = free_tail; } else { z_ops->allocate = allocate_head; z_ops->free = free_head; } /* zap the counters */ memset(zone->z_stat, 0 , sizeof(zone->z_stat)); zone->active = true; active_zones++; if (active_zones == 1) pr_info("Physical OCMEM zone layout:\n"); pr_info(" zone %s\t: 0x%08lx - 0x%08lx (%4ld KB)\n", client_names[part->id], zone->z_start, zone->z_end - 1, part->p_size/SZ_1K); } if (!debugfs_create_file("zones", S_IRUGO, pdata->debug_node, NULL, &zones_show_fops)) { dev_err(dev, "Unable to create debugfs node for zones\n"); return -EBUSY; } if (!debugfs_create_file("stats", S_IRUGO, pdata->debug_node, NULL, &stats_show_fops)) { dev_err(dev, "Unable to create debugfs node for stats\n"); return -EBUSY; } if (!debugfs_create_file("timing", S_IRUGO, pdata->debug_node, NULL, &timing_show_fops)) { dev_err(dev, "Unable to create debugfs node for timing\n"); return -EBUSY; } dev_dbg(dev, "Total active zones = %d\n", active_zones); return 0; } /* Enable the ocmem graphics mpU as a workaround */ #ifdef CONFIG_MSM_OCMEM_NONSECURE static int ocmem_init_gfx_mpu(struct platform_device *pdev) { int rc; struct device *dev = &pdev->dev; void __iomem *ocmem_region_vbase = NULL; ocmem_region_vbase = devm_ioremap_nocache(dev, OCMEM_REGION_CTL_BASE, OCMEM_REGION_CTL_SIZE); if (!ocmem_region_vbase) return -EBUSY; rc = ocmem_enable_core_clock(); if (rc < 0) return rc; writel_relaxed(GRAPHICS_REGION_CTL, ocmem_region_vbase + 0xFCC); ocmem_disable_core_clock(); return 0; } #else static int ocmem_init_gfx_mpu(struct platform_device *pdev) { return 0; } #endif /* CONFIG_MSM_OCMEM_NONSECURE */ static int __devinit ocmem_debugfs_init(struct platform_device *pdev) { struct dentry *debug_dir = NULL; struct ocmem_plat_data *pdata = platform_get_drvdata(pdev); debug_dir = debugfs_create_dir("ocmem", NULL); if (!debug_dir || IS_ERR(debug_dir)) { pr_err("ocmem: Unable to create debugfs root\n"); return PTR_ERR(debug_dir); } pdata->debug_node = debug_dir; return 0; } static void __devexit ocmem_debugfs_exit(struct platform_device *pdev) { struct ocmem_plat_data *pdata = platform_get_drvdata(pdev); debugfs_remove_recursive(pdata->debug_node); } static int __devinit msm_ocmem_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct clk *ocmem_core_clk = NULL; struct clk *ocmem_iface_clk = NULL; int rc; if (!pdev->dev.of_node) { dev_info(dev, "Missing Configuration in Device Tree\n"); ocmem_pdata = parse_static_config(pdev); } else { ocmem_pdata = parse_dt_config(pdev); } /* Check if we have some configuration data to start */ if (!ocmem_pdata) return -ENODEV; /* Sanity Checks */ BUG_ON(!IS_ALIGNED(ocmem_pdata->size, PAGE_SIZE)); BUG_ON(!IS_ALIGNED(ocmem_pdata->base, PAGE_SIZE)); dev_info(dev, "OCMEM Virtual addr %p\n", ocmem_pdata->vbase); ocmem_core_clk = devm_clk_get(dev, "core_clk"); if (IS_ERR(ocmem_core_clk)) { dev_err(dev, "Unable to get the core clock\n"); return PTR_ERR(ocmem_core_clk); } /* The core clock is synchronous with graphics */ if (clk_set_rate(ocmem_core_clk, 1000) < 0) { dev_err(dev, "Set rate failed on the core clock\n"); return -EBUSY; } ocmem_iface_clk = devm_clk_get(dev, "iface_clk"); if (IS_ERR_OR_NULL(ocmem_iface_clk)) ocmem_iface_clk = NULL; ocmem_pdata->core_clk = ocmem_core_clk; ocmem_pdata->iface_clk = ocmem_iface_clk; platform_set_drvdata(pdev, ocmem_pdata); rc = ocmem_enable_core_clock(); if (rc < 0) goto core_clk_fail; rc = ocmem_enable_iface_clock(); if (rc < 0) goto iface_clk_fail; /* Parameter to be updated based on TZ */ /* Allow the OCMEM CSR to be programmed */ if (ocmem_restore_sec_program(OCMEM_SECURE_DEV_ID)) { ocmem_disable_iface_clock(); ocmem_disable_core_clock(); return -EBUSY; } ocmem_disable_iface_clock(); ocmem_disable_core_clock(); if (ocmem_debugfs_init(pdev)) dev_err(dev, "ocmem: No debugfs node available\n"); if (ocmem_core_init(pdev)) return -EBUSY; if (ocmem_zone_init(pdev)) return -EBUSY; if (ocmem_notifier_init()) return -EBUSY; if (ocmem_sched_init(pdev)) return -EBUSY; if (ocmem_rdm_init(pdev)) return -EBUSY; if (ocmem_init_gfx_mpu(pdev)) { dev_err(dev, "Unable to initialize Graphics mPU\n"); return -EBUSY; } dev_dbg(dev, "initialized successfully\n"); return 0; iface_clk_fail: ocmem_disable_core_clock(); core_clk_fail: return rc; } static int __devexit msm_ocmem_remove(struct platform_device *pdev) { ocmem_debugfs_exit(pdev); return 0; } static struct of_device_id msm_ocmem_dt_match[] = { { .compatible = "qcom,msm-ocmem", }, {} }; static struct platform_driver msm_ocmem_driver = { .probe = msm_ocmem_probe, .remove = __devexit_p(msm_ocmem_remove), .driver = { .name = "msm_ocmem", .owner = THIS_MODULE, .of_match_table = msm_ocmem_dt_match, }, }; static int __init ocmem_init(void) { return platform_driver_register(&msm_ocmem_driver); } subsys_initcall(ocmem_init); static void __exit ocmem_exit(void) { platform_driver_unregister(&msm_ocmem_driver); } module_exit(ocmem_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Support for On-Chip Memory on MSM");
gpl-2.0
hamayun/linux-kvm
fs/exofs/file.c
2452
2353
/* * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh <bharrosh@panasas.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * from * linux/fs/minix/inode.c * Copyright (C) 1991, 1992 Linus Torvalds * * This file is part of exofs. * * exofs is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. Since it is based on ext2, and the only * valid version of GPL for the Linux kernel is version 2, the only valid * version of GPL for exofs is version 2. * * exofs is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with exofs; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "exofs.h" static int exofs_release_file(struct inode *inode, struct file *filp) { return 0; } /* exofs_file_fsync - flush the inode to disk * * Note, in exofs all metadata is written as part of inode, regardless. * The writeout is synchronous */ static int exofs_file_fsync(struct file *filp, int datasync) { int ret; ret = sync_inode_metadata(filp->f_mapping->host, 1); return ret; } static int exofs_flush(struct file *file, fl_owner_t id) { int ret = vfs_fsync(file, 0); /* TODO: Flush the OSD target */ return ret; } const struct file_operations exofs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, .open = generic_file_open, .release = exofs_release_file, .fsync = exofs_file_fsync, .flush = exofs_flush, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, }; const struct inode_operations exofs_file_inode_operations = { .setattr = exofs_setattr, };
gpl-2.0
jiankangshiye/linux-2.6.39-9G45
security/integrity/ima/ima_fs.c
3220
9525
/* * Copyright (C) 2005,2006,2007,2008 IBM Corporation * * Authors: * Kylene Hall <kjhall@us.ibm.com> * Reiner Sailer <sailer@us.ibm.com> * Mimi Zohar <zohar@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * File: ima_fs.c * implemenents security file system for reporting * current measurement list and IMA statistics */ #include <linux/fcntl.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/parser.h> #include "ima.h" static int valid_policy = 1; #define TMPBUFLEN 12 static ssize_t ima_show_htable_value(char __user *buf, size_t count, loff_t *ppos, atomic_long_t *val) { char tmpbuf[TMPBUFLEN]; ssize_t len; len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val)); return simple_read_from_buffer(buf, count, ppos, tmpbuf, len); } static ssize_t ima_show_htable_violations(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { return ima_show_htable_value(buf, count, ppos, &ima_htable.violations); } static const struct file_operations ima_htable_violations_ops = { .read = ima_show_htable_violations, .llseek = generic_file_llseek, }; static ssize_t ima_show_measurements_count(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { return ima_show_htable_value(buf, count, ppos, &ima_htable.len); } static const struct file_operations ima_measurements_count_ops = { .read = ima_show_measurements_count, .llseek = generic_file_llseek, }; /* returns pointer to hlist_node */ static void *ima_measurements_start(struct seq_file *m, loff_t *pos) { loff_t l = *pos; struct ima_queue_entry *qe; /* we need a lock since pos could point beyond last element */ rcu_read_lock(); list_for_each_entry_rcu(qe, &ima_measurements, later) { if (!l--) { rcu_read_unlock(); return qe; } } rcu_read_unlock(); return NULL; } static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos) { struct ima_queue_entry *qe = v; /* lock protects when reading beyond last element * against concurrent list-extension */ rcu_read_lock(); qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later); rcu_read_unlock(); (*pos)++; return (&qe->later == &ima_measurements) ? NULL : qe; } static void ima_measurements_stop(struct seq_file *m, void *v) { } static void ima_putc(struct seq_file *m, void *data, int datalen) { while (datalen--) seq_putc(m, *(char *)data++); } /* print format: * 32bit-le=pcr# * char[20]=template digest * 32bit-le=template name size * char[n]=template name * eventdata[n]=template specific data */ static int ima_measurements_show(struct seq_file *m, void *v) { /* the list never shrinks, so we don't need a lock here */ struct ima_queue_entry *qe = v; struct ima_template_entry *e; int namelen; u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX; /* get entry */ e = qe->entry; if (e == NULL) return -1; /* * 1st: PCRIndex * PCR used is always the same (config option) in * little-endian format */ ima_putc(m, &pcr, sizeof pcr); /* 2nd: template digest */ ima_putc(m, e->digest, IMA_DIGEST_SIZE); /* 3rd: template name size */ namelen = strlen(e->template_name); ima_putc(m, &namelen, sizeof namelen); /* 4th: template name */ ima_putc(m, (void *)e->template_name, namelen); /* 5th: template specific data */ ima_template_show(m, (struct ima_template_data *)&e->template, IMA_SHOW_BINARY); return 0; } static const struct seq_operations ima_measurments_seqops = { .start = ima_measurements_start, .next = ima_measurements_next, .stop = ima_measurements_stop, .show = ima_measurements_show }; static int ima_measurements_open(struct inode *inode, struct file *file) { return seq_open(file, &ima_measurments_seqops); } static const struct file_operations ima_measurements_ops = { .open = ima_measurements_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void ima_print_digest(struct seq_file *m, u8 *digest) { int i; for (i = 0; i < IMA_DIGEST_SIZE; i++) seq_printf(m, "%02x", *(digest + i)); } void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show) { struct ima_template_data *entry = e; int namelen; switch (show) { case IMA_SHOW_ASCII: ima_print_digest(m, entry->digest); seq_printf(m, " %s\n", entry->file_name); break; case IMA_SHOW_BINARY: ima_putc(m, entry->digest, IMA_DIGEST_SIZE); namelen = strlen(entry->file_name); ima_putc(m, &namelen, sizeof namelen); ima_putc(m, entry->file_name, namelen); default: break; } } /* print in ascii */ static int ima_ascii_measurements_show(struct seq_file *m, void *v) { /* the list never shrinks, so we don't need a lock here */ struct ima_queue_entry *qe = v; struct ima_template_entry *e; /* get entry */ e = qe->entry; if (e == NULL) return -1; /* 1st: PCR used (config option) */ seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX); /* 2nd: SHA1 template hash */ ima_print_digest(m, e->digest); /* 3th: template name */ seq_printf(m, " %s ", e->template_name); /* 4th: template specific data */ ima_template_show(m, (struct ima_template_data *)&e->template, IMA_SHOW_ASCII); return 0; } static const struct seq_operations ima_ascii_measurements_seqops = { .start = ima_measurements_start, .next = ima_measurements_next, .stop = ima_measurements_stop, .show = ima_ascii_measurements_show }; static int ima_ascii_measurements_open(struct inode *inode, struct file *file) { return seq_open(file, &ima_ascii_measurements_seqops); } static const struct file_operations ima_ascii_measurements_ops = { .open = ima_ascii_measurements_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static ssize_t ima_write_policy(struct file *file, const char __user *buf, size_t datalen, loff_t *ppos) { char *data = NULL; ssize_t result; if (datalen >= PAGE_SIZE) datalen = PAGE_SIZE - 1; /* No partial writes. */ result = -EINVAL; if (*ppos != 0) goto out; result = -ENOMEM; data = kmalloc(datalen + 1, GFP_KERNEL); if (!data) goto out; *(data + datalen) = '\0'; result = -EFAULT; if (copy_from_user(data, buf, datalen)) goto out; result = ima_parse_add_rule(data); out: if (result < 0) valid_policy = 0; kfree(data); return result; } static struct dentry *ima_dir; static struct dentry *binary_runtime_measurements; static struct dentry *ascii_runtime_measurements; static struct dentry *runtime_measurements_count; static struct dentry *violations; static struct dentry *ima_policy; static atomic_t policy_opencount = ATOMIC_INIT(1); /* * ima_open_policy: sequentialize access to the policy file */ int ima_open_policy(struct inode * inode, struct file * filp) { /* No point in being allowed to open it if you aren't going to write */ if (!(filp->f_flags & O_WRONLY)) return -EACCES; if (atomic_dec_and_test(&policy_opencount)) return 0; return -EBUSY; } /* * ima_release_policy - start using the new measure policy rules. * * Initially, ima_measure points to the default policy rules, now * point to the new policy rules, and remove the securityfs policy file, * assuming a valid policy. */ static int ima_release_policy(struct inode *inode, struct file *file) { if (!valid_policy) { ima_delete_rules(); valid_policy = 1; atomic_set(&policy_opencount, 1); return 0; } ima_update_policy(); securityfs_remove(ima_policy); ima_policy = NULL; return 0; } static const struct file_operations ima_measure_policy_ops = { .open = ima_open_policy, .write = ima_write_policy, .release = ima_release_policy, .llseek = generic_file_llseek, }; int __init ima_fs_init(void) { ima_dir = securityfs_create_dir("ima", NULL); if (IS_ERR(ima_dir)) return -1; binary_runtime_measurements = securityfs_create_file("binary_runtime_measurements", S_IRUSR | S_IRGRP, ima_dir, NULL, &ima_measurements_ops); if (IS_ERR(binary_runtime_measurements)) goto out; ascii_runtime_measurements = securityfs_create_file("ascii_runtime_measurements", S_IRUSR | S_IRGRP, ima_dir, NULL, &ima_ascii_measurements_ops); if (IS_ERR(ascii_runtime_measurements)) goto out; runtime_measurements_count = securityfs_create_file("runtime_measurements_count", S_IRUSR | S_IRGRP, ima_dir, NULL, &ima_measurements_count_ops); if (IS_ERR(runtime_measurements_count)) goto out; violations = securityfs_create_file("violations", S_IRUSR | S_IRGRP, ima_dir, NULL, &ima_htable_violations_ops); if (IS_ERR(violations)) goto out; ima_policy = securityfs_create_file("policy", S_IWUSR, ima_dir, NULL, &ima_measure_policy_ops); if (IS_ERR(ima_policy)) goto out; return 0; out: securityfs_remove(runtime_measurements_count); securityfs_remove(ascii_runtime_measurements); securityfs_remove(binary_runtime_measurements); securityfs_remove(ima_dir); securityfs_remove(ima_policy); return -1; } void __exit ima_fs_cleanup(void) { securityfs_remove(violations); securityfs_remove(runtime_measurements_count); securityfs_remove(ascii_runtime_measurements); securityfs_remove(binary_runtime_measurements); securityfs_remove(ima_dir); securityfs_remove(ima_policy); }
gpl-2.0
Perferom/android_kernel_lge_msm7x27-3.0.x
fs/omfs/inode.c
3220
13606
/* * Optimized MPEG FS - inode and super operations. * Copyright (C) 2006 Bob Copeland <me@bobcopeland.com> * Released under GPL v2. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/vfs.h> #include <linux/parser.h> #include <linux/buffer_head.h> #include <linux/vmalloc.h> #include <linux/writeback.h> #include <linux/crc-itu-t.h> #include "omfs.h" MODULE_AUTHOR("Bob Copeland <me@bobcopeland.com>"); MODULE_DESCRIPTION("OMFS (ReplayTV/Karma) Filesystem for Linux"); MODULE_LICENSE("GPL"); struct buffer_head *omfs_bread(struct super_block *sb, sector_t block) { struct omfs_sb_info *sbi = OMFS_SB(sb); if (block >= sbi->s_num_blocks) return NULL; return sb_bread(sb, clus_to_blk(sbi, block)); } struct inode *omfs_new_inode(struct inode *dir, int mode) { struct inode *inode; u64 new_block; int err; int len; struct omfs_sb_info *sbi = OMFS_SB(dir->i_sb); inode = new_inode(dir->i_sb); if (!inode) return ERR_PTR(-ENOMEM); err = omfs_allocate_range(dir->i_sb, sbi->s_mirrors, sbi->s_mirrors, &new_block, &len); if (err) goto fail; inode->i_ino = new_block; inode_init_owner(inode, NULL, mode); inode->i_mapping->a_ops = &omfs_aops; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch (mode & S_IFMT) { case S_IFDIR: inode->i_op = &omfs_dir_inops; inode->i_fop = &omfs_dir_operations; inode->i_size = sbi->s_sys_blocksize; inc_nlink(inode); break; case S_IFREG: inode->i_op = &omfs_file_inops; inode->i_fop = &omfs_file_operations; inode->i_size = 0; break; } insert_inode_hash(inode); mark_inode_dirty(inode); return inode; fail: make_bad_inode(inode); iput(inode); return ERR_PTR(err); } /* * Update the header checksums for a dirty inode based on its contents. * Caller is expected to hold the buffer head underlying oi and mark it * dirty. */ static void omfs_update_checksums(struct omfs_inode *oi) { int xor, i, ofs = 0, count; u16 crc = 0; unsigned char *ptr = (unsigned char *) oi; count = be32_to_cpu(oi->i_head.h_body_size); ofs = sizeof(struct omfs_header); crc = crc_itu_t(crc, ptr + ofs, count); oi->i_head.h_crc = cpu_to_be16(crc); xor = ptr[0]; for (i = 1; i < OMFS_XOR_COUNT; i++) xor ^= ptr[i]; oi->i_head.h_check_xor = xor; } static int __omfs_write_inode(struct inode *inode, int wait) { struct omfs_inode *oi; struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); struct buffer_head *bh, *bh2; u64 ctime; int i; int ret = -EIO; int sync_failed = 0; /* get current inode since we may have written sibling ptrs etc. */ bh = omfs_bread(inode->i_sb, inode->i_ino); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; oi->i_head.h_self = cpu_to_be64(inode->i_ino); if (S_ISDIR(inode->i_mode)) oi->i_type = OMFS_DIR; else if (S_ISREG(inode->i_mode)) oi->i_type = OMFS_FILE; else { printk(KERN_WARNING "omfs: unknown file type: %d\n", inode->i_mode); goto out_brelse; } oi->i_head.h_body_size = cpu_to_be32(sbi->s_sys_blocksize - sizeof(struct omfs_header)); oi->i_head.h_version = 1; oi->i_head.h_type = OMFS_INODE_NORMAL; oi->i_head.h_magic = OMFS_IMAGIC; oi->i_size = cpu_to_be64(inode->i_size); ctime = inode->i_ctime.tv_sec * 1000LL + ((inode->i_ctime.tv_nsec + 999)/1000); oi->i_ctime = cpu_to_be64(ctime); omfs_update_checksums(oi); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) sync_failed = 1; } /* if mirroring writes, copy to next fsblock */ for (i = 1; i < sbi->s_mirrors; i++) { bh2 = omfs_bread(inode->i_sb, inode->i_ino + i); if (!bh2) goto out_brelse; memcpy(bh2->b_data, bh->b_data, bh->b_size); mark_buffer_dirty(bh2); if (wait) { sync_dirty_buffer(bh2); if (buffer_req(bh2) && !buffer_uptodate(bh2)) sync_failed = 1; } brelse(bh2); } ret = (sync_failed) ? -EIO : 0; out_brelse: brelse(bh); out: return ret; } static int omfs_write_inode(struct inode *inode, struct writeback_control *wbc) { return __omfs_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } int omfs_sync_inode(struct inode *inode) { return __omfs_write_inode(inode, 1); } /* * called when an entry is deleted, need to clear the bits in the * bitmaps. */ static void omfs_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); if (inode->i_nlink) return; if (S_ISREG(inode->i_mode)) { inode->i_size = 0; omfs_shrink_inode(inode); } omfs_clear_range(inode->i_sb, inode->i_ino, 2); } struct inode *omfs_iget(struct super_block *sb, ino_t ino) { struct omfs_sb_info *sbi = OMFS_SB(sb); struct omfs_inode *oi; struct buffer_head *bh; u64 ctime; unsigned long nsecs; struct inode *inode; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; bh = omfs_bread(inode->i_sb, ino); if (!bh) goto iget_failed; oi = (struct omfs_inode *)bh->b_data; /* check self */ if (ino != be64_to_cpu(oi->i_head.h_self)) goto fail_bh; inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; ctime = be64_to_cpu(oi->i_ctime); nsecs = do_div(ctime, 1000) * 1000L; inode->i_atime.tv_sec = ctime; inode->i_mtime.tv_sec = ctime; inode->i_ctime.tv_sec = ctime; inode->i_atime.tv_nsec = nsecs; inode->i_mtime.tv_nsec = nsecs; inode->i_ctime.tv_nsec = nsecs; inode->i_mapping->a_ops = &omfs_aops; switch (oi->i_type) { case OMFS_DIR: inode->i_mode = S_IFDIR | (S_IRWXUGO & ~sbi->s_dmask); inode->i_op = &omfs_dir_inops; inode->i_fop = &omfs_dir_operations; inode->i_size = sbi->s_sys_blocksize; inc_nlink(inode); break; case OMFS_FILE: inode->i_mode = S_IFREG | (S_IRWXUGO & ~sbi->s_fmask); inode->i_fop = &omfs_file_operations; inode->i_size = be64_to_cpu(oi->i_size); break; } brelse(bh); unlock_new_inode(inode); return inode; fail_bh: brelse(bh); iget_failed: iget_failed(inode); return ERR_PTR(-EIO); } static void omfs_put_super(struct super_block *sb) { struct omfs_sb_info *sbi = OMFS_SB(sb); kfree(sbi->s_imap); kfree(sbi); sb->s_fs_info = NULL; } static int omfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *s = dentry->d_sb; struct omfs_sb_info *sbi = OMFS_SB(s); u64 id = huge_encode_dev(s->s_bdev->bd_dev); buf->f_type = OMFS_MAGIC; buf->f_bsize = sbi->s_blocksize; buf->f_blocks = sbi->s_num_blocks; buf->f_files = sbi->s_num_blocks; buf->f_namelen = OMFS_NAMELEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); buf->f_bfree = buf->f_bavail = buf->f_ffree = omfs_count_free(s); return 0; } static const struct super_operations omfs_sops = { .write_inode = omfs_write_inode, .evict_inode = omfs_evict_inode, .put_super = omfs_put_super, .statfs = omfs_statfs, .show_options = generic_show_options, }; /* * For Rio Karma, there is an on-disk free bitmap whose location is * stored in the root block. For ReplayTV, there is no such free bitmap * so we have to walk the tree. Both inodes and file data are allocated * from the same map. This array can be big (300k) so we allocate * in units of the blocksize. */ static int omfs_get_imap(struct super_block *sb) { int bitmap_size; int array_size; int count; struct omfs_sb_info *sbi = OMFS_SB(sb); struct buffer_head *bh; unsigned long **ptr; sector_t block; bitmap_size = DIV_ROUND_UP(sbi->s_num_blocks, 8); array_size = DIV_ROUND_UP(bitmap_size, sb->s_blocksize); if (sbi->s_bitmap_ino == ~0ULL) goto out; sbi->s_imap_size = array_size; sbi->s_imap = kzalloc(array_size * sizeof(unsigned long *), GFP_KERNEL); if (!sbi->s_imap) goto nomem; block = clus_to_blk(sbi, sbi->s_bitmap_ino); if (block >= sbi->s_num_blocks) goto nomem; ptr = sbi->s_imap; for (count = bitmap_size; count > 0; count -= sb->s_blocksize) { bh = sb_bread(sb, block++); if (!bh) goto nomem_free; *ptr = kmalloc(sb->s_blocksize, GFP_KERNEL); if (!*ptr) { brelse(bh); goto nomem_free; } memcpy(*ptr, bh->b_data, sb->s_blocksize); if (count < sb->s_blocksize) memset((void *)*ptr + count, 0xff, sb->s_blocksize - count); brelse(bh); ptr++; } out: return 0; nomem_free: for (count = 0; count < array_size; count++) kfree(sbi->s_imap[count]); kfree(sbi->s_imap); nomem: sbi->s_imap = NULL; sbi->s_imap_size = 0; return -ENOMEM; } enum { Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask }; static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_umask, "umask=%o"}, {Opt_dmask, "dmask=%o"}, {Opt_fmask, "fmask=%o"}, }; static int parse_options(char *options, struct omfs_sb_info *sbi) { char *p; substring_t args[MAX_OPT_ARGS]; int option; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_uid: if (match_int(&args[0], &option)) return 0; sbi->s_uid = option; break; case Opt_gid: if (match_int(&args[0], &option)) return 0; sbi->s_gid = option; break; case Opt_umask: if (match_octal(&args[0], &option)) return 0; sbi->s_fmask = sbi->s_dmask = option; break; case Opt_dmask: if (match_octal(&args[0], &option)) return 0; sbi->s_dmask = option; break; case Opt_fmask: if (match_octal(&args[0], &option)) return 0; sbi->s_fmask = option; break; default: return 0; } } return 1; } static int omfs_fill_super(struct super_block *sb, void *data, int silent) { struct buffer_head *bh, *bh2; struct omfs_super_block *omfs_sb; struct omfs_root_block *omfs_rb; struct omfs_sb_info *sbi; struct inode *root; int ret = -EINVAL; save_mount_options(sb, (char *) data); sbi = kzalloc(sizeof(struct omfs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; sbi->s_uid = current_uid(); sbi->s_gid = current_gid(); sbi->s_dmask = sbi->s_fmask = current_umask(); if (!parse_options((char *) data, sbi)) goto end; sb->s_maxbytes = 0xffffffff; sb_set_blocksize(sb, 0x200); bh = sb_bread(sb, 0); if (!bh) goto end; omfs_sb = (struct omfs_super_block *)bh->b_data; if (omfs_sb->s_magic != cpu_to_be32(OMFS_MAGIC)) { if (!silent) printk(KERN_ERR "omfs: Invalid superblock (%x)\n", omfs_sb->s_magic); goto out_brelse_bh; } sb->s_magic = OMFS_MAGIC; sbi->s_num_blocks = be64_to_cpu(omfs_sb->s_num_blocks); sbi->s_blocksize = be32_to_cpu(omfs_sb->s_blocksize); sbi->s_mirrors = be32_to_cpu(omfs_sb->s_mirrors); sbi->s_root_ino = be64_to_cpu(omfs_sb->s_root_block); sbi->s_sys_blocksize = be32_to_cpu(omfs_sb->s_sys_blocksize); mutex_init(&sbi->s_bitmap_lock); if (sbi->s_sys_blocksize > PAGE_SIZE) { printk(KERN_ERR "omfs: sysblock size (%d) is out of range\n", sbi->s_sys_blocksize); goto out_brelse_bh; } if (sbi->s_blocksize < sbi->s_sys_blocksize || sbi->s_blocksize > OMFS_MAX_BLOCK_SIZE) { printk(KERN_ERR "omfs: block size (%d) is out of range\n", sbi->s_blocksize); goto out_brelse_bh; } /* * Use sys_blocksize as the fs block since it is smaller than a * page while the fs blocksize can be larger. */ sb_set_blocksize(sb, sbi->s_sys_blocksize); /* * ...and the difference goes into a shift. sys_blocksize is always * a power of two factor of blocksize. */ sbi->s_block_shift = get_bitmask_order(sbi->s_blocksize) - get_bitmask_order(sbi->s_sys_blocksize); bh2 = omfs_bread(sb, be64_to_cpu(omfs_sb->s_root_block)); if (!bh2) goto out_brelse_bh; omfs_rb = (struct omfs_root_block *)bh2->b_data; sbi->s_bitmap_ino = be64_to_cpu(omfs_rb->r_bitmap); sbi->s_clustersize = be32_to_cpu(omfs_rb->r_clustersize); if (sbi->s_num_blocks != be64_to_cpu(omfs_rb->r_num_blocks)) { printk(KERN_ERR "omfs: block count discrepancy between " "super and root blocks (%llx, %llx)\n", (unsigned long long)sbi->s_num_blocks, (unsigned long long)be64_to_cpu(omfs_rb->r_num_blocks)); goto out_brelse_bh2; } if (sbi->s_bitmap_ino != ~0ULL && sbi->s_bitmap_ino > sbi->s_num_blocks) { printk(KERN_ERR "omfs: free space bitmap location is corrupt " "(%llx, total blocks %llx)\n", (unsigned long long) sbi->s_bitmap_ino, (unsigned long long) sbi->s_num_blocks); goto out_brelse_bh2; } if (sbi->s_clustersize < 1 || sbi->s_clustersize > OMFS_MAX_CLUSTER_SIZE) { printk(KERN_ERR "omfs: cluster size out of range (%d)", sbi->s_clustersize); goto out_brelse_bh2; } ret = omfs_get_imap(sb); if (ret) goto out_brelse_bh2; sb->s_op = &omfs_sops; root = omfs_iget(sb, be64_to_cpu(omfs_rb->r_root_dir)); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out_brelse_bh2; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { iput(root); goto out_brelse_bh2; } printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name); ret = 0; out_brelse_bh2: brelse(bh2); out_brelse_bh: brelse(bh); end: if (ret) kfree(sbi); return ret; } static struct dentry *omfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, omfs_fill_super); } static struct file_system_type omfs_fs_type = { .owner = THIS_MODULE, .name = "omfs", .mount = omfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static int __init init_omfs_fs(void) { return register_filesystem(&omfs_fs_type); } static void __exit exit_omfs_fs(void) { unregister_filesystem(&omfs_fs_type); } module_init(init_omfs_fs); module_exit(exit_omfs_fs);
gpl-2.0
jdkernel/jdkernel_vigor_2.6.35
drivers/isdn/mISDN/clock.c
3988
6411
/* * Copyright 2008 by Andreas Eversberg <andreas@eversberg.eu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Quick API description: * * A clock source registers using mISDN_register_clock: * name = text string to name clock source * priority = value to priorize clock sources (0 = default) * ctl = callback function to enable/disable clock source * priv = private pointer of clock source * return = pointer to clock source structure; * * Note: Callback 'ctl' can be called before mISDN_register_clock returns! * Also it can be called during mISDN_unregister_clock. * * A clock source calls mISDN_clock_update with given samples elapsed, if * enabled. If function call is delayed, tv must be set with the timestamp * of the actual event. * * A clock source unregisters using mISDN_unregister_clock. * * To get current clock, call mISDN_clock_get. The signed short value * counts the number of samples since. Time since last clock event is added. * */ #include <linux/slab.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/spinlock.h> #include <linux/mISDNif.h> #include "core.h" static u_int *debug; static LIST_HEAD(iclock_list); static DEFINE_RWLOCK(iclock_lock); static u16 iclock_count; /* counter of last clock */ static struct timeval iclock_tv; /* time stamp of last clock */ static int iclock_tv_valid; /* already received one timestamp */ static struct mISDNclock *iclock_current; void mISDN_init_clock(u_int *dp) { debug = dp; do_gettimeofday(&iclock_tv); } static void select_iclock(void) { struct mISDNclock *iclock, *bestclock = NULL, *lastclock = NULL; int pri = -128; list_for_each_entry(iclock, &iclock_list, list) { if (iclock->pri > pri) { pri = iclock->pri; bestclock = iclock; } if (iclock_current == iclock) lastclock = iclock; } if (lastclock && bestclock != lastclock) { /* last used clock source still exists but changes, disable */ if (*debug & DEBUG_CLOCK) printk(KERN_DEBUG "Old clock source '%s' disable.\n", lastclock->name); lastclock->ctl(lastclock->priv, 0); } if (bestclock && bestclock != iclock_current) { /* new clock source selected, enable */ if (*debug & DEBUG_CLOCK) printk(KERN_DEBUG "New clock source '%s' enable.\n", bestclock->name); bestclock->ctl(bestclock->priv, 1); } if (bestclock != iclock_current) { /* no clock received yet */ iclock_tv_valid = 0; } iclock_current = bestclock; } struct mISDNclock *mISDN_register_clock(char *name, int pri, clockctl_func_t *ctl, void *priv) { u_long flags; struct mISDNclock *iclock; if (*debug & (DEBUG_CORE | DEBUG_CLOCK)) printk(KERN_DEBUG "%s: %s %d\n", __func__, name, pri); iclock = kzalloc(sizeof(struct mISDNclock), GFP_ATOMIC); if (!iclock) { printk(KERN_ERR "%s: No memory for clock entry.\n", __func__); return NULL; } strncpy(iclock->name, name, sizeof(iclock->name)-1); iclock->pri = pri; iclock->priv = priv; iclock->ctl = ctl; write_lock_irqsave(&iclock_lock, flags); list_add_tail(&iclock->list, &iclock_list); select_iclock(); write_unlock_irqrestore(&iclock_lock, flags); return iclock; } EXPORT_SYMBOL(mISDN_register_clock); void mISDN_unregister_clock(struct mISDNclock *iclock) { u_long flags; if (*debug & (DEBUG_CORE | DEBUG_CLOCK)) printk(KERN_DEBUG "%s: %s %d\n", __func__, iclock->name, iclock->pri); write_lock_irqsave(&iclock_lock, flags); if (iclock_current == iclock) { if (*debug & DEBUG_CLOCK) printk(KERN_DEBUG "Current clock source '%s' unregisters.\n", iclock->name); iclock->ctl(iclock->priv, 0); } list_del(&iclock->list); select_iclock(); write_unlock_irqrestore(&iclock_lock, flags); } EXPORT_SYMBOL(mISDN_unregister_clock); void mISDN_clock_update(struct mISDNclock *iclock, int samples, struct timeval *tv) { u_long flags; struct timeval tv_now; time_t elapsed_sec; int elapsed_8000th; write_lock_irqsave(&iclock_lock, flags); if (iclock_current != iclock) { printk(KERN_ERR "%s: '%s' sends us clock updates, but we do " "listen to '%s'. This is a bug!\n", __func__, iclock->name, iclock_current ? iclock_current->name : "nothing"); iclock->ctl(iclock->priv, 0); write_unlock_irqrestore(&iclock_lock, flags); return; } if (iclock_tv_valid) { /* increment sample counter by given samples */ iclock_count += samples; if (tv) { /* tv must be set, if function call is delayed */ iclock_tv.tv_sec = tv->tv_sec; iclock_tv.tv_usec = tv->tv_usec; } else do_gettimeofday(&iclock_tv); } else { /* calc elapsed time by system clock */ if (tv) { /* tv must be set, if function call is delayed */ tv_now.tv_sec = tv->tv_sec; tv_now.tv_usec = tv->tv_usec; } else do_gettimeofday(&tv_now); elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec; elapsed_8000th = (tv_now.tv_usec / 125) - (iclock_tv.tv_usec / 125); if (elapsed_8000th < 0) { elapsed_sec -= 1; elapsed_8000th += 8000; } /* add elapsed time to counter and set new timestamp */ iclock_count += elapsed_sec * 8000 + elapsed_8000th; iclock_tv.tv_sec = tv_now.tv_sec; iclock_tv.tv_usec = tv_now.tv_usec; iclock_tv_valid = 1; if (*debug & DEBUG_CLOCK) printk("Received first clock from source '%s'.\n", iclock_current ? iclock_current->name : "nothing"); } write_unlock_irqrestore(&iclock_lock, flags); } EXPORT_SYMBOL(mISDN_clock_update); unsigned short mISDN_clock_get(void) { u_long flags; struct timeval tv_now; time_t elapsed_sec; int elapsed_8000th; u16 count; read_lock_irqsave(&iclock_lock, flags); /* calc elapsed time by system clock */ do_gettimeofday(&tv_now); elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec; elapsed_8000th = (tv_now.tv_usec / 125) - (iclock_tv.tv_usec / 125); if (elapsed_8000th < 0) { elapsed_sec -= 1; elapsed_8000th += 8000; } /* add elapsed time to counter */ count = iclock_count + elapsed_sec * 8000 + elapsed_8000th; read_unlock_irqrestore(&iclock_lock, flags); return count; } EXPORT_SYMBOL(mISDN_clock_get);
gpl-2.0
fkfk/linux_gt-i9000-gb
drivers/dca/dca-sysfs.c
3988
2723
/* * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * The full GNU General Public License is included in this distribution in the * file called COPYING. */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/idr.h> #include <linux/kdev_t.h> #include <linux/err.h> #include <linux/dca.h> #include <linux/gfp.h> static struct class *dca_class; static struct idr dca_idr; static spinlock_t dca_idr_lock; int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot) { struct device *cd; static int req_count; cd = device_create(dca_class, dca->cd, MKDEV(0, slot + 1), NULL, "requester%d", req_count++); if (IS_ERR(cd)) return PTR_ERR(cd); return 0; } void dca_sysfs_remove_req(struct dca_provider *dca, int slot) { device_destroy(dca_class, MKDEV(0, slot + 1)); } int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev) { struct device *cd; int err = 0; idr_try_again: if (!idr_pre_get(&dca_idr, GFP_KERNEL)) return -ENOMEM; spin_lock(&dca_idr_lock); err = idr_get_new(&dca_idr, dca, &dca->id); spin_unlock(&dca_idr_lock); switch (err) { case 0: break; case -EAGAIN: goto idr_try_again; default: return err; } cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id); if (IS_ERR(cd)) { spin_lock(&dca_idr_lock); idr_remove(&dca_idr, dca->id); spin_unlock(&dca_idr_lock); return PTR_ERR(cd); } dca->cd = cd; return 0; } void dca_sysfs_remove_provider(struct dca_provider *dca) { device_unregister(dca->cd); dca->cd = NULL; spin_lock(&dca_idr_lock); idr_remove(&dca_idr, dca->id); spin_unlock(&dca_idr_lock); } int __init dca_sysfs_init(void) { idr_init(&dca_idr); spin_lock_init(&dca_idr_lock); dca_class = class_create(THIS_MODULE, "dca"); if (IS_ERR(dca_class)) { idr_destroy(&dca_idr); return PTR_ERR(dca_class); } return 0; } void __exit dca_sysfs_exit(void) { class_destroy(dca_class); idr_destroy(&dca_idr); }
gpl-2.0
alexax66/CM11_kernel_serranodsxx
drivers/platform/x86/acer-wmi.c
4244
50094
/* * Acer WMI Laptop Extras * * Copyright (C) 2007-2009 Carlos Corbacho <carlos@strangeworlds.co.uk> * * Based on acer_acpi: * Copyright (C) 2005-2007 E.M. Smith * Copyright (C) 2007-2008 Carlos Corbacho <cathectic@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/dmi.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/acpi.h> #include <linux/i8042.h> #include <linux/rfkill.h> #include <linux/workqueue.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <acpi/acpi_drivers.h> #include <acpi/video.h> MODULE_AUTHOR("Carlos Corbacho"); MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver"); MODULE_LICENSE("GPL"); /* * Magic Number * Meaning is unknown - this number is required for writing to ACPI for AMW0 * (it's also used in acerhk when directly accessing the BIOS) */ #define ACER_AMW0_WRITE 0x9610 /* * Bit masks for the AMW0 interface */ #define ACER_AMW0_WIRELESS_MASK 0x35 #define ACER_AMW0_BLUETOOTH_MASK 0x34 #define ACER_AMW0_MAILLED_MASK 0x31 /* * Method IDs for WMID interface */ #define ACER_WMID_GET_WIRELESS_METHODID 1 #define ACER_WMID_GET_BLUETOOTH_METHODID 2 #define ACER_WMID_GET_BRIGHTNESS_METHODID 3 #define ACER_WMID_SET_WIRELESS_METHODID 4 #define ACER_WMID_SET_BLUETOOTH_METHODID 5 #define ACER_WMID_SET_BRIGHTNESS_METHODID 6 #define ACER_WMID_GET_THREEG_METHODID 10 #define ACER_WMID_SET_THREEG_METHODID 11 /* * Acer ACPI method GUIDs */ #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" #define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" #define WMID_GUID2 "95764E09-FB56-4E83-B31A-37761F60994A" #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" /* * Acer ACPI event GUIDs */ #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"); MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); enum acer_wmi_event_ids { WMID_HOTKEY_EVENT = 0x1, }; static const struct key_entry acer_wmi_keymap[] = { {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x04, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */ {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */ {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */ {KE_IGNORE, 0x41, {KEY_MUTE} }, {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} }, {KE_IGNORE, 0x4d, {KEY_PREVIOUSSONG} }, {KE_IGNORE, 0x43, {KEY_NEXTSONG} }, {KE_IGNORE, 0x4e, {KEY_NEXTSONG} }, {KE_IGNORE, 0x44, {KEY_PLAYPAUSE} }, {KE_IGNORE, 0x4f, {KEY_PLAYPAUSE} }, {KE_IGNORE, 0x45, {KEY_STOP} }, {KE_IGNORE, 0x50, {KEY_STOP} }, {KE_IGNORE, 0x48, {KEY_VOLUMEUP} }, {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} }, {KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} }, {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} }, {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} }, {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} }, {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ {KE_IGNORE, 0x81, {KEY_SLEEP} }, {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */ {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} }, {KE_END, 0} }; static struct input_dev *acer_wmi_input_dev; struct event_return_value { u8 function; u8 key_num; u16 device_state; u32 reserved; } __attribute__((packed)); /* * GUID3 Get Device Status device flags */ #define ACER_WMID3_GDS_WIRELESS (1<<0) /* WiFi */ #define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */ #define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */ #define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */ struct lm_input_params { u8 function_num; /* Function Number */ u16 commun_devices; /* Communication type devices default status */ u16 devices; /* Other type devices default status */ u8 lm_status; /* Launch Manager Status */ u16 reserved; } __attribute__((packed)); struct lm_return_value { u8 error_code; /* Error Code */ u8 ec_return_value; /* EC Return Value */ u16 reserved; } __attribute__((packed)); struct wmid3_gds_set_input_param { /* Set Device Status input parameter */ u8 function_num; /* Function Number */ u8 hotkey_number; /* Hotkey Number */ u16 devices; /* Set Device */ u8 volume_value; /* Volume Value */ } __attribute__((packed)); struct wmid3_gds_get_input_param { /* Get Device Status input parameter */ u8 function_num; /* Function Number */ u8 hotkey_number; /* Hotkey Number */ u16 devices; /* Get Device */ } __attribute__((packed)); struct wmid3_gds_return_value { /* Get Device Status return value*/ u8 error_code; /* Error Code */ u8 ec_return_value; /* EC Return Value */ u16 devices; /* Current Device Status */ u32 reserved; } __attribute__((packed)); struct hotkey_function_type_aa { u8 type; u8 length; u16 handle; u16 commun_func_bitmap; u16 application_func_bitmap; u16 media_func_bitmap; u16 display_func_bitmap; u16 others_func_bitmap; u8 commun_fn_key_number; } __attribute__((packed)); /* * Interface capability flags */ #define ACER_CAP_MAILLED (1<<0) #define ACER_CAP_WIRELESS (1<<1) #define ACER_CAP_BLUETOOTH (1<<2) #define ACER_CAP_BRIGHTNESS (1<<3) #define ACER_CAP_THREEG (1<<4) #define ACER_CAP_ANY (0xFFFFFFFF) /* * Interface type flags */ enum interface_flags { ACER_AMW0, ACER_AMW0_V2, ACER_WMID, ACER_WMID_v2, }; #define ACER_DEFAULT_WIRELESS 0 #define ACER_DEFAULT_BLUETOOTH 0 #define ACER_DEFAULT_MAILLED 0 #define ACER_DEFAULT_THREEG 0 static int max_brightness = 0xF; static int mailled = -1; static int brightness = -1; static int threeg = -1; static int force_series; static bool ec_raw_mode; static bool has_type_aa; static u16 commun_func_bitmap; static u8 commun_fn_key_number; module_param(mailled, int, 0444); module_param(brightness, int, 0444); module_param(threeg, int, 0444); module_param(force_series, int, 0444); module_param(ec_raw_mode, bool, 0444); MODULE_PARM_DESC(mailled, "Set initial state of Mail LED"); MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness"); MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware"); MODULE_PARM_DESC(force_series, "Force a different laptop series"); MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode"); struct acer_data { int mailled; int threeg; int brightness; }; struct acer_debug { struct dentry *root; struct dentry *devices; u32 wmid_devices; }; static struct rfkill *wireless_rfkill; static struct rfkill *bluetooth_rfkill; static struct rfkill *threeg_rfkill; static bool rfkill_inited; /* Each low-level interface must define at least some of the following */ struct wmi_interface { /* The WMI device type */ u32 type; /* The capabilities this interface provides */ u32 capability; /* Private data for the current interface */ struct acer_data data; /* debugfs entries associated with this interface */ struct acer_debug debug; }; /* The static interface pointer, points to the currently detected interface */ static struct wmi_interface *interface; /* * Embedded Controller quirks * Some laptops require us to directly access the EC to either enable or query * features that are not available through WMI. */ struct quirk_entry { u8 wireless; u8 mailled; s8 brightness; u8 bluetooth; }; static struct quirk_entry *quirks; static void set_quirks(void) { if (!interface) return; if (quirks->mailled) interface->capability |= ACER_CAP_MAILLED; if (quirks->brightness) interface->capability |= ACER_CAP_BRIGHTNESS; } static int dmi_matched(const struct dmi_system_id *dmi) { quirks = dmi->driver_data; return 1; } static struct quirk_entry quirk_unknown = { }; static struct quirk_entry quirk_acer_aspire_1520 = { .brightness = -1, }; static struct quirk_entry quirk_acer_travelmate_2490 = { .mailled = 1, }; /* This AMW0 laptop has no bluetooth */ static struct quirk_entry quirk_medion_md_98300 = { .wireless = 1, }; static struct quirk_entry quirk_fujitsu_amilo_li_1718 = { .wireless = 2, }; static struct quirk_entry quirk_lenovo_ideapad_s205 = { .wireless = 3, }; /* The Aspire One has a dummy ACPI-WMI interface - disable it */ static struct dmi_system_id __devinitdata acer_blacklist[] = { { .ident = "Acer Aspire One (SSD)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), }, }, { .ident = "Acer Aspire One (HDD)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), }, }, {} }; static struct dmi_system_id acer_quirks[] = { { .callback = dmi_matched, .ident = "Acer Aspire 1360", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), }, .driver_data = &quirk_acer_aspire_1520, }, { .callback = dmi_matched, .ident = "Acer Aspire 1520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1520"), }, .driver_data = &quirk_acer_aspire_1520, }, { .callback = dmi_matched, .ident = "Acer Aspire 3100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 3610", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3610"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5610", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5630", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5650", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5680", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 9110", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer TravelMate 2490", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer TravelMate 4200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4200"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Fujitsu Siemens Amilo Li 1718", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Li 1718"), }, .driver_data = &quirk_fujitsu_amilo_li_1718, }, { .callback = dmi_matched, .ident = "Medion MD 98300", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), DMI_MATCH(DMI_PRODUCT_NAME, "WAM2030"), }, .driver_data = &quirk_medion_md_98300, }, { .callback = dmi_matched, .ident = "Lenovo Ideapad S205", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"), }, .driver_data = &quirk_lenovo_ideapad_s205, }, { .callback = dmi_matched, .ident = "Lenovo Ideapad S205 (Brazos)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "Brazos"), }, .driver_data = &quirk_lenovo_ideapad_s205, }, { .callback = dmi_matched, .ident = "Lenovo 3000 N200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "0687A31"), }, .driver_data = &quirk_fujitsu_amilo_li_1718, }, {} }; static int video_set_backlight_video_vendor(const struct dmi_system_id *d) { interface->capability &= ~ACER_CAP_BRIGHTNESS; pr_info("Brightness must be controlled by generic video driver\n"); return 0; } static const struct dmi_system_id video_vendor_dmi_table[] = { { .callback = video_set_backlight_video_vendor, .ident = "Acer TravelMate 4750", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"), }, }, {} }; /* Find which quirks are needed for a particular vendor/ model pair */ static void find_quirks(void) { if (!force_series) { dmi_check_system(acer_quirks); } else if (force_series == 2490) { quirks = &quirk_acer_travelmate_2490; } if (quirks == NULL) quirks = &quirk_unknown; set_quirks(); } /* * General interface convenience methods */ static bool has_cap(u32 cap) { if ((interface->capability & cap) != 0) return 1; return 0; } /* * AMW0 (V1) interface */ struct wmab_args { u32 eax; u32 ebx; u32 ecx; u32 edx; }; struct wmab_ret { u32 eax; u32 ebx; u32 ecx; u32 edx; u32 eex; }; static acpi_status wmab_execute(struct wmab_args *regbuf, struct acpi_buffer *result) { struct acpi_buffer input; acpi_status status; input.length = sizeof(struct wmab_args); input.pointer = (u8 *)regbuf; status = wmi_evaluate_method(AMW0_GUID1, 1, 1, &input, result); return status; } static acpi_status AMW0_get_u32(u32 *value, u32 cap) { int err; u8 result; switch (cap) { case ACER_CAP_MAILLED: switch (quirks->mailled) { default: err = ec_read(0xA, &result); if (err) return AE_ERROR; *value = (result >> 7) & 0x1; return AE_OK; } break; case ACER_CAP_WIRELESS: switch (quirks->wireless) { case 1: err = ec_read(0x7B, &result); if (err) return AE_ERROR; *value = result & 0x1; return AE_OK; case 2: err = ec_read(0x71, &result); if (err) return AE_ERROR; *value = result & 0x1; return AE_OK; case 3: err = ec_read(0x78, &result); if (err) return AE_ERROR; *value = result & 0x1; return AE_OK; default: err = ec_read(0xA, &result); if (err) return AE_ERROR; *value = (result >> 2) & 0x1; return AE_OK; } break; case ACER_CAP_BLUETOOTH: switch (quirks->bluetooth) { default: err = ec_read(0xA, &result); if (err) return AE_ERROR; *value = (result >> 4) & 0x1; return AE_OK; } break; case ACER_CAP_BRIGHTNESS: switch (quirks->brightness) { default: err = ec_read(0x83, &result); if (err) return AE_ERROR; *value = result; return AE_OK; } break; default: return AE_ERROR; } return AE_OK; } static acpi_status AMW0_set_u32(u32 value, u32 cap) { struct wmab_args args; args.eax = ACER_AMW0_WRITE; args.ebx = value ? (1<<8) : 0; args.ecx = args.edx = 0; switch (cap) { case ACER_CAP_MAILLED: if (value > 1) return AE_BAD_PARAMETER; args.ebx |= ACER_AMW0_MAILLED_MASK; break; case ACER_CAP_WIRELESS: if (value > 1) return AE_BAD_PARAMETER; args.ebx |= ACER_AMW0_WIRELESS_MASK; break; case ACER_CAP_BLUETOOTH: if (value > 1) return AE_BAD_PARAMETER; args.ebx |= ACER_AMW0_BLUETOOTH_MASK; break; case ACER_CAP_BRIGHTNESS: if (value > max_brightness) return AE_BAD_PARAMETER; switch (quirks->brightness) { default: return ec_write(0x83, value); break; } default: return AE_ERROR; } /* Actually do the set */ return wmab_execute(&args, NULL); } static acpi_status AMW0_find_mailled(void) { struct wmab_args args; struct wmab_ret ret; acpi_status status = AE_OK; struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; args.eax = 0x86; args.ebx = args.ecx = args.edx = 0; status = wmab_execute(&args, &out); if (ACPI_FAILURE(status)) return status; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == sizeof(struct wmab_ret)) { ret = *((struct wmab_ret *) obj->buffer.pointer); } else { kfree(out.pointer); return AE_ERROR; } if (ret.eex & 0x1) interface->capability |= ACER_CAP_MAILLED; kfree(out.pointer); return AE_OK; } static int AMW0_set_cap_acpi_check_device_found; static acpi_status AMW0_set_cap_acpi_check_device_cb(acpi_handle handle, u32 level, void *context, void **retval) { AMW0_set_cap_acpi_check_device_found = 1; return AE_OK; } static const struct acpi_device_id norfkill_ids[] = { { "VPC2004", 0}, { "IBM0068", 0}, { "LEN0068", 0}, { "SNY5001", 0}, /* sony-laptop in charge */ { "", 0}, }; static int AMW0_set_cap_acpi_check_device(void) { const struct acpi_device_id *id; for (id = norfkill_ids; id->id[0]; id++) acpi_get_devices(id->id, AMW0_set_cap_acpi_check_device_cb, NULL, NULL); return AMW0_set_cap_acpi_check_device_found; } static acpi_status AMW0_set_capabilities(void) { struct wmab_args args; struct wmab_ret ret; acpi_status status; struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; /* * On laptops with this strange GUID (non Acer), normal probing doesn't * work. */ if (wmi_has_guid(AMW0_GUID2)) { if ((quirks != &quirk_unknown) || !AMW0_set_cap_acpi_check_device()) interface->capability |= ACER_CAP_WIRELESS; return AE_OK; } args.eax = ACER_AMW0_WRITE; args.ecx = args.edx = 0; args.ebx = 0xa2 << 8; args.ebx |= ACER_AMW0_WIRELESS_MASK; status = wmab_execute(&args, &out); if (ACPI_FAILURE(status)) return status; obj = out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == sizeof(struct wmab_ret)) { ret = *((struct wmab_ret *) obj->buffer.pointer); } else { status = AE_ERROR; goto out; } if (ret.eax & 0x1) interface->capability |= ACER_CAP_WIRELESS; args.ebx = 2 << 8; args.ebx |= ACER_AMW0_BLUETOOTH_MASK; /* * It's ok to use existing buffer for next wmab_execute call. * But we need to kfree(out.pointer) if next wmab_execute fail. */ status = wmab_execute(&args, &out); if (ACPI_FAILURE(status)) goto out; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == sizeof(struct wmab_ret)) { ret = *((struct wmab_ret *) obj->buffer.pointer); } else { status = AE_ERROR; goto out; } if (ret.eax & 0x1) interface->capability |= ACER_CAP_BLUETOOTH; /* * This appears to be safe to enable, since all Wistron based laptops * appear to use the same EC register for brightness, even if they * differ for wireless, etc */ if (quirks->brightness >= 0) interface->capability |= ACER_CAP_BRIGHTNESS; status = AE_OK; out: kfree(out.pointer); return status; } static struct wmi_interface AMW0_interface = { .type = ACER_AMW0, }; static struct wmi_interface AMW0_V2_interface = { .type = ACER_AMW0_V2, }; /* * New interface (The WMID interface) */ static acpi_status WMI_execute_u32(u32 method_id, u32 in, u32 *out) { struct acpi_buffer input = { (acpi_size) sizeof(u32), (void *)(&in) }; struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; u32 tmp; acpi_status status; status = wmi_evaluate_method(WMID_GUID1, 1, method_id, &input, &result); if (ACPI_FAILURE(status)) return status; obj = (union acpi_object *) result.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && (obj->buffer.length == sizeof(u32) || obj->buffer.length == sizeof(u64))) { tmp = *((u32 *) obj->buffer.pointer); } else if (obj->type == ACPI_TYPE_INTEGER) { tmp = (u32) obj->integer.value; } else { tmp = 0; } if (out) *out = tmp; kfree(result.pointer); return status; } static acpi_status WMID_get_u32(u32 *value, u32 cap) { acpi_status status; u8 tmp; u32 result, method_id = 0; switch (cap) { case ACER_CAP_WIRELESS: method_id = ACER_WMID_GET_WIRELESS_METHODID; break; case ACER_CAP_BLUETOOTH: method_id = ACER_WMID_GET_BLUETOOTH_METHODID; break; case ACER_CAP_BRIGHTNESS: method_id = ACER_WMID_GET_BRIGHTNESS_METHODID; break; case ACER_CAP_THREEG: method_id = ACER_WMID_GET_THREEG_METHODID; break; case ACER_CAP_MAILLED: if (quirks->mailled == 1) { ec_read(0x9f, &tmp); *value = tmp & 0x1; return 0; } default: return AE_ERROR; } status = WMI_execute_u32(method_id, 0, &result); if (ACPI_SUCCESS(status)) *value = (u8)result; return status; } static acpi_status WMID_set_u32(u32 value, u32 cap) { u32 method_id = 0; char param; switch (cap) { case ACER_CAP_BRIGHTNESS: if (value > max_brightness) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_BRIGHTNESS_METHODID; break; case ACER_CAP_WIRELESS: if (value > 1) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_WIRELESS_METHODID; break; case ACER_CAP_BLUETOOTH: if (value > 1) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_BLUETOOTH_METHODID; break; case ACER_CAP_THREEG: if (value > 1) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_THREEG_METHODID; break; case ACER_CAP_MAILLED: if (value > 1) return AE_BAD_PARAMETER; if (quirks->mailled == 1) { param = value ? 0x92 : 0x93; i8042_lock_chip(); i8042_command(&param, 0x1059); i8042_unlock_chip(); return 0; } break; default: return AE_ERROR; } return WMI_execute_u32(method_id, (u32)value, NULL); } static acpi_status wmid3_get_device_status(u32 *value, u16 device) { struct wmid3_gds_return_value return_value; acpi_status status; union acpi_object *obj; struct wmid3_gds_get_input_param params = { .function_num = 0x1, .hotkey_number = commun_fn_key_number, .devices = device, }; struct acpi_buffer input = { sizeof(struct wmid3_gds_get_input_param), &params }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output); if (ACPI_FAILURE(status)) return status; obj = output.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 8) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer); kfree(obj); if (return_value.error_code || return_value.ec_return_value) pr_warn("Get 0x%x Device Status failed: 0x%x - 0x%x\n", device, return_value.error_code, return_value.ec_return_value); else *value = !!(return_value.devices & device); return status; } static acpi_status wmid_v2_get_u32(u32 *value, u32 cap) { u16 device; switch (cap) { case ACER_CAP_WIRELESS: device = ACER_WMID3_GDS_WIRELESS; break; case ACER_CAP_BLUETOOTH: device = ACER_WMID3_GDS_BLUETOOTH; break; case ACER_CAP_THREEG: device = ACER_WMID3_GDS_THREEG; break; default: return AE_ERROR; } return wmid3_get_device_status(value, device); } static acpi_status wmid3_set_device_status(u32 value, u16 device) { struct wmid3_gds_return_value return_value; acpi_status status; union acpi_object *obj; u16 devices; struct wmid3_gds_get_input_param get_params = { .function_num = 0x1, .hotkey_number = commun_fn_key_number, .devices = commun_func_bitmap, }; struct acpi_buffer get_input = { sizeof(struct wmid3_gds_get_input_param), &get_params }; struct wmid3_gds_set_input_param set_params = { .function_num = 0x2, .hotkey_number = commun_fn_key_number, .devices = commun_func_bitmap, }; struct acpi_buffer set_input = { sizeof(struct wmid3_gds_set_input_param), &set_params }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer output2 = { ACPI_ALLOCATE_BUFFER, NULL }; status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &get_input, &output); if (ACPI_FAILURE(status)) return status; obj = output.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 8) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer); kfree(obj); if (return_value.error_code || return_value.ec_return_value) { pr_warn("Get Current Device Status failed: 0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); return status; } devices = return_value.devices; set_params.devices = (value) ? (devices | device) : (devices & ~device); status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &set_input, &output2); if (ACPI_FAILURE(status)) return status; obj = output2.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 4) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer); kfree(obj); if (return_value.error_code || return_value.ec_return_value) pr_warn("Set Device Status failed: 0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); return status; } static acpi_status wmid_v2_set_u32(u32 value, u32 cap) { u16 device; switch (cap) { case ACER_CAP_WIRELESS: device = ACER_WMID3_GDS_WIRELESS; break; case ACER_CAP_BLUETOOTH: device = ACER_WMID3_GDS_BLUETOOTH; break; case ACER_CAP_THREEG: device = ACER_WMID3_GDS_THREEG; break; default: return AE_ERROR; } return wmid3_set_device_status(value, device); } static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy) { struct hotkey_function_type_aa *type_aa; /* We are looking for OEM-specific Type AAh */ if (header->type != 0xAA) return; has_type_aa = true; type_aa = (struct hotkey_function_type_aa *) header; pr_info("Function bitmap for Communication Button: 0x%x\n", type_aa->commun_func_bitmap); commun_func_bitmap = type_aa->commun_func_bitmap; if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS) interface->capability |= ACER_CAP_WIRELESS; if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_THREEG) interface->capability |= ACER_CAP_THREEG; if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH) interface->capability |= ACER_CAP_BLUETOOTH; commun_fn_key_number = type_aa->commun_fn_key_number; } static acpi_status WMID_set_capabilities(void) { struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; acpi_status status; u32 devices; status = wmi_query_block(WMID_GUID2, 1, &out); if (ACPI_FAILURE(status)) return status; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && (obj->buffer.length == sizeof(u32) || obj->buffer.length == sizeof(u64))) { devices = *((u32 *) obj->buffer.pointer); } else if (obj->type == ACPI_TYPE_INTEGER) { devices = (u32) obj->integer.value; } else { kfree(out.pointer); return AE_ERROR; } pr_info("Function bitmap for Communication Device: 0x%x\n", devices); if (devices & 0x07) interface->capability |= ACER_CAP_WIRELESS; if (devices & 0x40) interface->capability |= ACER_CAP_THREEG; if (devices & 0x10) interface->capability |= ACER_CAP_BLUETOOTH; if (!(devices & 0x20)) max_brightness = 0x9; kfree(out.pointer); return status; } static struct wmi_interface wmid_interface = { .type = ACER_WMID, }; static struct wmi_interface wmid_v2_interface = { .type = ACER_WMID_v2, }; /* * Generic Device (interface-independent) */ static acpi_status get_u32(u32 *value, u32 cap) { acpi_status status = AE_ERROR; switch (interface->type) { case ACER_AMW0: status = AMW0_get_u32(value, cap); break; case ACER_AMW0_V2: if (cap == ACER_CAP_MAILLED) { status = AMW0_get_u32(value, cap); break; } case ACER_WMID: status = WMID_get_u32(value, cap); break; case ACER_WMID_v2: if (cap & (ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) status = wmid_v2_get_u32(value, cap); else if (wmi_has_guid(WMID_GUID2)) status = WMID_get_u32(value, cap); break; } return status; } static acpi_status set_u32(u32 value, u32 cap) { acpi_status status; if (interface->capability & cap) { switch (interface->type) { case ACER_AMW0: return AMW0_set_u32(value, cap); case ACER_AMW0_V2: if (cap == ACER_CAP_MAILLED) return AMW0_set_u32(value, cap); /* * On some models, some WMID methods don't toggle * properly. For those cases, we want to run the AMW0 * method afterwards to be certain we've really toggled * the device state. */ if (cap == ACER_CAP_WIRELESS || cap == ACER_CAP_BLUETOOTH) { status = WMID_set_u32(value, cap); if (ACPI_FAILURE(status)) return status; return AMW0_set_u32(value, cap); } case ACER_WMID: return WMID_set_u32(value, cap); case ACER_WMID_v2: if (cap & (ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) return wmid_v2_set_u32(value, cap); else if (wmi_has_guid(WMID_GUID2)) return WMID_set_u32(value, cap); default: return AE_BAD_PARAMETER; } } return AE_BAD_PARAMETER; } static void __init acer_commandline_init(void) { /* * These will all fail silently if the value given is invalid, or the * capability isn't available on the given interface */ if (mailled >= 0) set_u32(mailled, ACER_CAP_MAILLED); if (!has_type_aa && threeg >= 0) set_u32(threeg, ACER_CAP_THREEG); if (brightness >= 0) set_u32(brightness, ACER_CAP_BRIGHTNESS); } /* * LED device (Mail LED only, no other LEDs known yet) */ static void mail_led_set(struct led_classdev *led_cdev, enum led_brightness value) { set_u32(value, ACER_CAP_MAILLED); } static struct led_classdev mail_led = { .name = "acer-wmi::mail", .brightness_set = mail_led_set, }; static int __devinit acer_led_init(struct device *dev) { return led_classdev_register(dev, &mail_led); } static void acer_led_exit(void) { set_u32(LED_OFF, ACER_CAP_MAILLED); led_classdev_unregister(&mail_led); } /* * Backlight device */ static struct backlight_device *acer_backlight_device; static int read_brightness(struct backlight_device *bd) { u32 value; get_u32(&value, ACER_CAP_BRIGHTNESS); return value; } static int update_bl_status(struct backlight_device *bd) { int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; set_u32(intensity, ACER_CAP_BRIGHTNESS); return 0; } static const struct backlight_ops acer_bl_ops = { .get_brightness = read_brightness, .update_status = update_bl_status, }; static int __devinit acer_backlight_init(struct device *dev) { struct backlight_properties props; struct backlight_device *bd; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = max_brightness; bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops, &props); if (IS_ERR(bd)) { pr_err("Could not register Acer backlight device\n"); acer_backlight_device = NULL; return PTR_ERR(bd); } acer_backlight_device = bd; bd->props.power = FB_BLANK_UNBLANK; bd->props.brightness = read_brightness(bd); backlight_update_status(bd); return 0; } static void acer_backlight_exit(void) { backlight_device_unregister(acer_backlight_device); } /* * Rfkill devices */ static void acer_rfkill_update(struct work_struct *ignored); static DECLARE_DELAYED_WORK(acer_rfkill_work, acer_rfkill_update); static void acer_rfkill_update(struct work_struct *ignored) { u32 state; acpi_status status; if (has_cap(ACER_CAP_WIRELESS)) { status = get_u32(&state, ACER_CAP_WIRELESS); if (ACPI_SUCCESS(status)) { if (quirks->wireless == 3) rfkill_set_hw_state(wireless_rfkill, !state); else rfkill_set_sw_state(wireless_rfkill, !state); } } if (has_cap(ACER_CAP_BLUETOOTH)) { status = get_u32(&state, ACER_CAP_BLUETOOTH); if (ACPI_SUCCESS(status)) rfkill_set_sw_state(bluetooth_rfkill, !state); } if (has_cap(ACER_CAP_THREEG) && wmi_has_guid(WMID_GUID3)) { status = get_u32(&state, ACER_WMID3_GDS_THREEG); if (ACPI_SUCCESS(status)) rfkill_set_sw_state(threeg_rfkill, !state); } schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); } static int acer_rfkill_set(void *data, bool blocked) { acpi_status status; u32 cap = (unsigned long)data; if (rfkill_inited) { status = set_u32(!blocked, cap); if (ACPI_FAILURE(status)) return -ENODEV; } return 0; } static const struct rfkill_ops acer_rfkill_ops = { .set_block = acer_rfkill_set, }; static struct rfkill *acer_rfkill_register(struct device *dev, enum rfkill_type type, char *name, u32 cap) { int err; struct rfkill *rfkill_dev; u32 state; acpi_status status; rfkill_dev = rfkill_alloc(name, dev, type, &acer_rfkill_ops, (void *)(unsigned long)cap); if (!rfkill_dev) return ERR_PTR(-ENOMEM); status = get_u32(&state, cap); err = rfkill_register(rfkill_dev); if (err) { rfkill_destroy(rfkill_dev); return ERR_PTR(err); } if (ACPI_SUCCESS(status)) rfkill_set_sw_state(rfkill_dev, !state); return rfkill_dev; } static int acer_rfkill_init(struct device *dev) { int err; if (has_cap(ACER_CAP_WIRELESS)) { wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN, "acer-wireless", ACER_CAP_WIRELESS); if (IS_ERR(wireless_rfkill)) { err = PTR_ERR(wireless_rfkill); goto error_wireless; } } if (has_cap(ACER_CAP_BLUETOOTH)) { bluetooth_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_BLUETOOTH, "acer-bluetooth", ACER_CAP_BLUETOOTH); if (IS_ERR(bluetooth_rfkill)) { err = PTR_ERR(bluetooth_rfkill); goto error_bluetooth; } } if (has_cap(ACER_CAP_THREEG)) { threeg_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WWAN, "acer-threeg", ACER_CAP_THREEG); if (IS_ERR(threeg_rfkill)) { err = PTR_ERR(threeg_rfkill); goto error_threeg; } } rfkill_inited = true; if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) && has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); return 0; error_threeg: if (has_cap(ACER_CAP_BLUETOOTH)) { rfkill_unregister(bluetooth_rfkill); rfkill_destroy(bluetooth_rfkill); } error_bluetooth: if (has_cap(ACER_CAP_WIRELESS)) { rfkill_unregister(wireless_rfkill); rfkill_destroy(wireless_rfkill); } error_wireless: return err; } static void acer_rfkill_exit(void) { if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) && has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) cancel_delayed_work_sync(&acer_rfkill_work); if (has_cap(ACER_CAP_WIRELESS)) { rfkill_unregister(wireless_rfkill); rfkill_destroy(wireless_rfkill); } if (has_cap(ACER_CAP_BLUETOOTH)) { rfkill_unregister(bluetooth_rfkill); rfkill_destroy(bluetooth_rfkill); } if (has_cap(ACER_CAP_THREEG)) { rfkill_unregister(threeg_rfkill); rfkill_destroy(threeg_rfkill); } return; } /* * sysfs interface */ static ssize_t show_bool_threeg(struct device *dev, struct device_attribute *attr, char *buf) { u32 result; \ acpi_status status; pr_info("This threeg sysfs will be removed in 2012 - used by: %s\n", current->comm); status = get_u32(&result, ACER_CAP_THREEG); if (ACPI_SUCCESS(status)) return sprintf(buf, "%u\n", result); return sprintf(buf, "Read error\n"); } static ssize_t set_bool_threeg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u32 tmp = simple_strtoul(buf, NULL, 10); acpi_status status = set_u32(tmp, ACER_CAP_THREEG); pr_info("This threeg sysfs will be removed in 2012 - used by: %s\n", current->comm); if (ACPI_FAILURE(status)) return -EINVAL; return count; } static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, set_bool_threeg); static ssize_t show_interface(struct device *dev, struct device_attribute *attr, char *buf) { pr_info("This interface sysfs will be removed in 2012 - used by: %s\n", current->comm); switch (interface->type) { case ACER_AMW0: return sprintf(buf, "AMW0\n"); case ACER_AMW0_V2: return sprintf(buf, "AMW0 v2\n"); case ACER_WMID: return sprintf(buf, "WMID\n"); case ACER_WMID_v2: return sprintf(buf, "WMID v2\n"); default: return sprintf(buf, "Error!\n"); } } static DEVICE_ATTR(interface, S_IRUGO, show_interface, NULL); static void acer_wmi_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct event_return_value return_value; acpi_status status; u16 device_state; const struct key_entry *key; status = wmi_get_event_data(value, &response); if (status != AE_OK) { pr_warn("bad event status 0x%x\n", status); return; } obj = (union acpi_object *)response.pointer; if (!obj) return; if (obj->type != ACPI_TYPE_BUFFER) { pr_warn("Unknown response received %d\n", obj->type); kfree(obj); return; } if (obj->buffer.length != 8) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return; } return_value = *((struct event_return_value *)obj->buffer.pointer); kfree(obj); switch (return_value.function) { case WMID_HOTKEY_EVENT: device_state = return_value.device_state; pr_debug("device state: 0x%x\n", device_state); key = sparse_keymap_entry_from_scancode(acer_wmi_input_dev, return_value.key_num); if (!key) { pr_warn("Unknown key number - 0x%x\n", return_value.key_num); } else { switch (key->keycode) { case KEY_WLAN: case KEY_BLUETOOTH: if (has_cap(ACER_CAP_WIRELESS)) rfkill_set_sw_state(wireless_rfkill, !(device_state & ACER_WMID3_GDS_WIRELESS)); if (has_cap(ACER_CAP_THREEG)) rfkill_set_sw_state(threeg_rfkill, !(device_state & ACER_WMID3_GDS_THREEG)); if (has_cap(ACER_CAP_BLUETOOTH)) rfkill_set_sw_state(bluetooth_rfkill, !(device_state & ACER_WMID3_GDS_BLUETOOTH)); break; } sparse_keymap_report_entry(acer_wmi_input_dev, key, 1, true); } break; default: pr_warn("Unknown function number - %d - %d\n", return_value.function, return_value.key_num); break; } } static acpi_status wmid3_set_lm_mode(struct lm_input_params *params, struct lm_return_value *return_value) { acpi_status status; union acpi_object *obj; struct acpi_buffer input = { sizeof(struct lm_input_params), params }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output); if (ACPI_FAILURE(status)) return status; obj = output.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 4) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } *return_value = *((struct lm_return_value *)obj->buffer.pointer); kfree(obj); return status; } static int acer_wmi_enable_ec_raw(void) { struct lm_return_value return_value; acpi_status status; struct lm_input_params params = { .function_num = 0x1, .commun_devices = 0xFFFF, .devices = 0xFFFF, .lm_status = 0x00, /* Launch Manager Deactive */ }; status = wmid3_set_lm_mode(&params, &return_value); if (return_value.error_code || return_value.ec_return_value) pr_warn("Enabling EC raw mode failed: 0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); else pr_info("Enabled EC raw mode\n"); return status; } static int acer_wmi_enable_lm(void) { struct lm_return_value return_value; acpi_status status; struct lm_input_params params = { .function_num = 0x1, .commun_devices = 0xFFFF, .devices = 0xFFFF, .lm_status = 0x01, /* Launch Manager Active */ }; status = wmid3_set_lm_mode(&params, &return_value); if (return_value.error_code || return_value.ec_return_value) pr_warn("Enabling Launch Manager failed: 0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); return status; } static int __init acer_wmi_input_setup(void) { acpi_status status; int err; acer_wmi_input_dev = input_allocate_device(); if (!acer_wmi_input_dev) return -ENOMEM; acer_wmi_input_dev->name = "Acer WMI hotkeys"; acer_wmi_input_dev->phys = "wmi/input0"; acer_wmi_input_dev->id.bustype = BUS_HOST; err = sparse_keymap_setup(acer_wmi_input_dev, acer_wmi_keymap, NULL); if (err) goto err_free_dev; status = wmi_install_notify_handler(ACERWMID_EVENT_GUID, acer_wmi_notify, NULL); if (ACPI_FAILURE(status)) { err = -EIO; goto err_free_keymap; } err = input_register_device(acer_wmi_input_dev); if (err) goto err_uninstall_notifier; return 0; err_uninstall_notifier: wmi_remove_notify_handler(ACERWMID_EVENT_GUID); err_free_keymap: sparse_keymap_free(acer_wmi_input_dev); err_free_dev: input_free_device(acer_wmi_input_dev); return err; } static void acer_wmi_input_destroy(void) { wmi_remove_notify_handler(ACERWMID_EVENT_GUID); sparse_keymap_free(acer_wmi_input_dev); input_unregister_device(acer_wmi_input_dev); } /* * debugfs functions */ static u32 get_wmid_devices(void) { struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; acpi_status status; u32 devices = 0; status = wmi_query_block(WMID_GUID2, 1, &out); if (ACPI_FAILURE(status)) return 0; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && (obj->buffer.length == sizeof(u32) || obj->buffer.length == sizeof(u64))) { devices = *((u32 *) obj->buffer.pointer); } else if (obj->type == ACPI_TYPE_INTEGER) { devices = (u32) obj->integer.value; } kfree(out.pointer); return devices; } /* * Platform device */ static int __devinit acer_platform_probe(struct platform_device *device) { int err; if (has_cap(ACER_CAP_MAILLED)) { err = acer_led_init(&device->dev); if (err) goto error_mailled; } if (has_cap(ACER_CAP_BRIGHTNESS)) { err = acer_backlight_init(&device->dev); if (err) goto error_brightness; } err = acer_rfkill_init(&device->dev); if (err) goto error_rfkill; return err; error_rfkill: if (has_cap(ACER_CAP_BRIGHTNESS)) acer_backlight_exit(); error_brightness: if (has_cap(ACER_CAP_MAILLED)) acer_led_exit(); error_mailled: return err; } static int acer_platform_remove(struct platform_device *device) { if (has_cap(ACER_CAP_MAILLED)) acer_led_exit(); if (has_cap(ACER_CAP_BRIGHTNESS)) acer_backlight_exit(); acer_rfkill_exit(); return 0; } static int acer_platform_suspend(struct platform_device *dev, pm_message_t state) { u32 value; struct acer_data *data = &interface->data; if (!data) return -ENOMEM; if (has_cap(ACER_CAP_MAILLED)) { get_u32(&value, ACER_CAP_MAILLED); set_u32(LED_OFF, ACER_CAP_MAILLED); data->mailled = value; } if (has_cap(ACER_CAP_BRIGHTNESS)) { get_u32(&value, ACER_CAP_BRIGHTNESS); data->brightness = value; } return 0; } static int acer_platform_resume(struct platform_device *device) { struct acer_data *data = &interface->data; if (!data) return -ENOMEM; if (has_cap(ACER_CAP_MAILLED)) set_u32(data->mailled, ACER_CAP_MAILLED); if (has_cap(ACER_CAP_BRIGHTNESS)) set_u32(data->brightness, ACER_CAP_BRIGHTNESS); return 0; } static void acer_platform_shutdown(struct platform_device *device) { struct acer_data *data = &interface->data; if (!data) return; if (has_cap(ACER_CAP_MAILLED)) set_u32(LED_OFF, ACER_CAP_MAILLED); } static struct platform_driver acer_platform_driver = { .driver = { .name = "acer-wmi", .owner = THIS_MODULE, }, .probe = acer_platform_probe, .remove = acer_platform_remove, .suspend = acer_platform_suspend, .resume = acer_platform_resume, .shutdown = acer_platform_shutdown, }; static struct platform_device *acer_platform_device; static int remove_sysfs(struct platform_device *device) { if (has_cap(ACER_CAP_THREEG)) device_remove_file(&device->dev, &dev_attr_threeg); device_remove_file(&device->dev, &dev_attr_interface); return 0; } static int create_sysfs(void) { int retval = -ENOMEM; if (has_cap(ACER_CAP_THREEG)) { retval = device_create_file(&acer_platform_device->dev, &dev_attr_threeg); if (retval) goto error_sysfs; } retval = device_create_file(&acer_platform_device->dev, &dev_attr_interface); if (retval) goto error_sysfs; return 0; error_sysfs: remove_sysfs(acer_platform_device); return retval; } static void remove_debugfs(void) { debugfs_remove(interface->debug.devices); debugfs_remove(interface->debug.root); } static int create_debugfs(void) { interface->debug.root = debugfs_create_dir("acer-wmi", NULL); if (!interface->debug.root) { pr_err("Failed to create debugfs directory"); return -ENOMEM; } interface->debug.devices = debugfs_create_u32("devices", S_IRUGO, interface->debug.root, &interface->debug.wmid_devices); if (!interface->debug.devices) goto error_debugfs; return 0; error_debugfs: remove_debugfs(); return -ENOMEM; } static int __init acer_wmi_init(void) { int err; pr_info("Acer Laptop ACPI-WMI Extras\n"); if (dmi_check_system(acer_blacklist)) { pr_info("Blacklisted hardware detected - not loading\n"); return -ENODEV; } find_quirks(); /* * Detect which ACPI-WMI interface we're using. */ if (wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1)) interface = &AMW0_V2_interface; if (!wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1)) interface = &wmid_interface; if (wmi_has_guid(WMID_GUID3)) interface = &wmid_v2_interface; if (interface) dmi_walk(type_aa_dmi_decode, NULL); if (wmi_has_guid(WMID_GUID2) && interface) { if (!has_type_aa && ACPI_FAILURE(WMID_set_capabilities())) { pr_err("Unable to detect available WMID devices\n"); return -ENODEV; } /* WMID always provides brightness methods */ interface->capability |= ACER_CAP_BRIGHTNESS; } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa) { pr_err("No WMID device detection method found\n"); return -ENODEV; } if (wmi_has_guid(AMW0_GUID1) && !wmi_has_guid(WMID_GUID1)) { interface = &AMW0_interface; if (ACPI_FAILURE(AMW0_set_capabilities())) { pr_err("Unable to detect available AMW0 devices\n"); return -ENODEV; } } if (wmi_has_guid(AMW0_GUID1)) AMW0_find_mailled(); if (!interface) { pr_err("No or unsupported WMI interface, unable to load\n"); return -ENODEV; } set_quirks(); if (acpi_video_backlight_support()) { if (dmi_check_system(video_vendor_dmi_table)) { acpi_video_unregister(); } else { interface->capability &= ~ACER_CAP_BRIGHTNESS; pr_info("Brightness must be controlled by " "acpi video driver\n"); } } if (wmi_has_guid(WMID_GUID3)) { if (ec_raw_mode) { if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) { pr_err("Cannot enable EC raw mode\n"); return -ENODEV; } } else if (ACPI_FAILURE(acer_wmi_enable_lm())) { pr_err("Cannot enable Launch Manager mode\n"); return -ENODEV; } } else if (ec_raw_mode) { pr_info("No WMID EC raw mode enable method\n"); } if (wmi_has_guid(ACERWMID_EVENT_GUID)) { err = acer_wmi_input_setup(); if (err) return err; } err = platform_driver_register(&acer_platform_driver); if (err) { pr_err("Unable to register platform driver\n"); goto error_platform_register; } acer_platform_device = platform_device_alloc("acer-wmi", -1); if (!acer_platform_device) { err = -ENOMEM; goto error_device_alloc; } err = platform_device_add(acer_platform_device); if (err) goto error_device_add; err = create_sysfs(); if (err) goto error_create_sys; if (wmi_has_guid(WMID_GUID2)) { interface->debug.wmid_devices = get_wmid_devices(); err = create_debugfs(); if (err) goto error_create_debugfs; } /* Override any initial settings with values from the commandline */ acer_commandline_init(); return 0; error_create_debugfs: remove_sysfs(acer_platform_device); error_create_sys: platform_device_del(acer_platform_device); error_device_add: platform_device_put(acer_platform_device); error_device_alloc: platform_driver_unregister(&acer_platform_driver); error_platform_register: if (wmi_has_guid(ACERWMID_EVENT_GUID)) acer_wmi_input_destroy(); return err; } static void __exit acer_wmi_exit(void) { if (wmi_has_guid(ACERWMID_EVENT_GUID)) acer_wmi_input_destroy(); remove_sysfs(acer_platform_device); remove_debugfs(); platform_device_unregister(acer_platform_device); platform_driver_unregister(&acer_platform_driver); pr_info("Acer Laptop WMI Extras unloaded\n"); return; } module_init(acer_wmi_init); module_exit(acer_wmi_exit);
gpl-2.0
1N4148/kernel_golden
fs/ext3/hash.c
5012
4451
/* * linux/fs/ext3/hash.c * * Copyright (C) 2002 by Theodore Ts'o * * This file is released under the GPL v2. * * This file may be redistributed under the terms of the GNU Public * License. */ #include <linux/fs.h> #include <linux/jbd.h> #include <linux/ext3_fs.h> #include <linux/cryptohash.h> #define DELTA 0x9E3779B9 static void TEA_transform(__u32 buf[4], __u32 const in[]) { __u32 sum = 0; __u32 b0 = buf[0], b1 = buf[1]; __u32 a = in[0], b = in[1], c = in[2], d = in[3]; int n = 16; do { sum += DELTA; b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); } while(--n); buf[0] += b0; buf[1] += b1; } /* The old legacy hash */ static __u32 dx_hack_hash_unsigned(const char *name, int len) { __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; const unsigned char *ucp = (const unsigned char *) name; while (len--) { hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373)); if (hash & 0x80000000) hash -= 0x7fffffff; hash1 = hash0; hash0 = hash; } return hash0 << 1; } static __u32 dx_hack_hash_signed(const char *name, int len) { __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; const signed char *scp = (const signed char *) name; while (len--) { hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373)); if (hash & 0x80000000) hash -= 0x7fffffff; hash1 = hash0; hash0 = hash; } return hash0 << 1; } static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; const signed char *scp = (const signed char *) msg; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i = 0; i < len; i++) { if ((i % 4) == 0) val = pad; val = ((int) scp[i]) + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; const unsigned char *ucp = (const unsigned char *) msg; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i=0; i < len; i++) { if ((i % 4) == 0) val = pad; val = ((int) ucp[i]) + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } /* * Returns the hash of a filename. If len is 0 and name is NULL, then * this function can be used to test whether or not a hash version is * supported. * * The seed is an 4 longword (32 bits) "secret" which can be used to * uniquify a hash. If the seed is all zero's, then some default seed * may be used. * * A particular hash version specifies whether or not the seed is * represented, and whether or not the returned hash is 32 bits or 64 * bits. 32 bit hashes will return 0 for the minor hash. */ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo) { __u32 hash; __u32 minor_hash = 0; const char *p; int i; __u32 in[8], buf[4]; void (*str2hashbuf)(const char *, int, __u32 *, int) = str2hashbuf_signed; /* Initialize the default seed for the hash checksum functions */ buf[0] = 0x67452301; buf[1] = 0xefcdab89; buf[2] = 0x98badcfe; buf[3] = 0x10325476; /* Check to see if the seed is all zero's */ if (hinfo->seed) { for (i=0; i < 4; i++) { if (hinfo->seed[i]) break; } if (i < 4) memcpy(buf, hinfo->seed, sizeof(buf)); } switch (hinfo->hash_version) { case DX_HASH_LEGACY_UNSIGNED: hash = dx_hack_hash_unsigned(name, len); break; case DX_HASH_LEGACY: hash = dx_hack_hash_signed(name, len); break; case DX_HASH_HALF_MD4_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; case DX_HASH_HALF_MD4: p = name; while (len > 0) { (*str2hashbuf)(p, len, in, 8); half_md4_transform(buf, in); len -= 32; p += 32; } minor_hash = buf[2]; hash = buf[1]; break; case DX_HASH_TEA_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; case DX_HASH_TEA: p = name; while (len > 0) { (*str2hashbuf)(p, len, in, 4); TEA_transform(buf, in); len -= 16; p += 16; } hash = buf[0]; minor_hash = buf[1]; break; default: hinfo->hash = 0; return -1; } hash = hash & ~1; if (hash == (EXT3_HTREE_EOF << 1)) hash = (EXT3_HTREE_EOF-1) << 1; hinfo->hash = hash; hinfo->minor_hash = minor_hash; return 0; }
gpl-2.0
scanno/android_kernel_asus_me301t
drivers/media/video/tlg2300/pd-video.c
8084
41693
#include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/videodev2.h> #include <linux/usb.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/slab.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-dev.h> #include "pd-common.h" #include "vendorcmds.h" #ifdef CONFIG_PM static int pm_video_suspend(struct poseidon *pd); static int pm_video_resume(struct poseidon *pd); #endif static void iso_bubble_handler(struct work_struct *w); static int usb_transfer_mode; module_param(usb_transfer_mode, int, 0644); MODULE_PARM_DESC(usb_transfer_mode, "0 = Bulk, 1 = Isochronous"); static const struct poseidon_format poseidon_formats[] = { { "YUV 422", V4L2_PIX_FMT_YUYV, 16, 0}, { "RGB565", V4L2_PIX_FMT_RGB565, 16, 0}, }; static const struct poseidon_tvnorm poseidon_tvnorms[] = { { V4L2_STD_PAL_D, "PAL-D", TLG_TUNE_VSTD_PAL_D }, { V4L2_STD_PAL_B, "PAL-B", TLG_TUNE_VSTD_PAL_B }, { V4L2_STD_PAL_G, "PAL-G", TLG_TUNE_VSTD_PAL_G }, { V4L2_STD_PAL_H, "PAL-H", TLG_TUNE_VSTD_PAL_H }, { V4L2_STD_PAL_I, "PAL-I", TLG_TUNE_VSTD_PAL_I }, { V4L2_STD_PAL_M, "PAL-M", TLG_TUNE_VSTD_PAL_M }, { V4L2_STD_PAL_N, "PAL-N", TLG_TUNE_VSTD_PAL_N_COMBO }, { V4L2_STD_PAL_Nc, "PAL-Nc", TLG_TUNE_VSTD_PAL_N_COMBO }, { V4L2_STD_NTSC_M, "NTSC-M", TLG_TUNE_VSTD_NTSC_M }, { V4L2_STD_NTSC_M_JP, "NTSC-JP", TLG_TUNE_VSTD_NTSC_M_J }, { V4L2_STD_SECAM_B, "SECAM-B", TLG_TUNE_VSTD_SECAM_B }, { V4L2_STD_SECAM_D, "SECAM-D", TLG_TUNE_VSTD_SECAM_D }, { V4L2_STD_SECAM_G, "SECAM-G", TLG_TUNE_VSTD_SECAM_G }, { V4L2_STD_SECAM_H, "SECAM-H", TLG_TUNE_VSTD_SECAM_H }, { V4L2_STD_SECAM_K, "SECAM-K", TLG_TUNE_VSTD_SECAM_K }, { V4L2_STD_SECAM_K1, "SECAM-K1", TLG_TUNE_VSTD_SECAM_K1 }, { V4L2_STD_SECAM_L, "SECAM-L", TLG_TUNE_VSTD_SECAM_L }, { V4L2_STD_SECAM_LC, "SECAM-LC", TLG_TUNE_VSTD_SECAM_L1 }, }; static const unsigned int POSEIDON_TVNORMS = ARRAY_SIZE(poseidon_tvnorms); struct pd_audio_mode { u32 tlg_audio_mode; u32 v4l2_audio_sub; u32 v4l2_audio_mode; }; static const struct pd_audio_mode pd_audio_modes[] = { { TLG_TUNE_TVAUDIO_MODE_MONO, V4L2_TUNER_SUB_MONO, V4L2_TUNER_MODE_MONO }, { TLG_TUNE_TVAUDIO_MODE_STEREO, V4L2_TUNER_SUB_STEREO, V4L2_TUNER_MODE_STEREO }, { TLG_TUNE_TVAUDIO_MODE_LANG_A, V4L2_TUNER_SUB_LANG1, V4L2_TUNER_MODE_LANG1 }, { TLG_TUNE_TVAUDIO_MODE_LANG_B, V4L2_TUNER_SUB_LANG2, V4L2_TUNER_MODE_LANG2 }, { TLG_TUNE_TVAUDIO_MODE_LANG_C, V4L2_TUNER_SUB_LANG1, V4L2_TUNER_MODE_LANG1_LANG2 } }; static const unsigned int POSEIDON_AUDIOMODS = ARRAY_SIZE(pd_audio_modes); struct pd_input { char *name; uint32_t tlg_src; }; static const struct pd_input pd_inputs[] = { { "TV Antenna", TLG_SIG_SRC_ANTENNA }, { "TV Cable", TLG_SIG_SRC_CABLE }, { "TV SVideo", TLG_SIG_SRC_SVIDEO }, { "TV Composite", TLG_SIG_SRC_COMPOSITE } }; static const unsigned int POSEIDON_INPUTS = ARRAY_SIZE(pd_inputs); struct poseidon_control { struct v4l2_queryctrl v4l2_ctrl; enum cmd_custom_param_id vc_id; }; static struct poseidon_control controls[] = { { { V4L2_CID_BRIGHTNESS, V4L2_CTRL_TYPE_INTEGER, "brightness", 0, 10000, 1, 100, 0, }, CUST_PARM_ID_BRIGHTNESS_CTRL }, { { V4L2_CID_CONTRAST, V4L2_CTRL_TYPE_INTEGER, "contrast", 0, 10000, 1, 100, 0, }, CUST_PARM_ID_CONTRAST_CTRL, }, { { V4L2_CID_HUE, V4L2_CTRL_TYPE_INTEGER, "hue", 0, 10000, 1, 100, 0, }, CUST_PARM_ID_HUE_CTRL, }, { { V4L2_CID_SATURATION, V4L2_CTRL_TYPE_INTEGER, "saturation", 0, 10000, 1, 100, 0, }, CUST_PARM_ID_SATURATION_CTRL, }, }; struct video_std_to_audio_std { v4l2_std_id video_std; int audio_std; }; static const struct video_std_to_audio_std video_to_audio_map[] = { /* country : { 27, 32, 33, 34, 36, 44, 45, 46, 47, 48, 64, 65, 86, 351, 352, 353, 354, 358, 372, 852, 972 } */ { (V4L2_STD_PAL_I | V4L2_STD_PAL_B | V4L2_STD_PAL_D | V4L2_STD_SECAM_L | V4L2_STD_SECAM_D), TLG_TUNE_ASTD_NICAM }, /* country : { 1, 52, 54, 55, 886 } */ {V4L2_STD_NTSC_M | V4L2_STD_PAL_N | V4L2_STD_PAL_M, TLG_TUNE_ASTD_BTSC}, /* country : { 81 } */ { V4L2_STD_NTSC_M_JP, TLG_TUNE_ASTD_EIAJ }, /* other country : TLG_TUNE_ASTD_A2 */ }; static const unsigned int map_size = ARRAY_SIZE(video_to_audio_map); static int get_audio_std(v4l2_std_id v4l2_std) { int i = 0; for (; i < map_size; i++) { if (v4l2_std & video_to_audio_map[i].video_std) return video_to_audio_map[i].audio_std; } return TLG_TUNE_ASTD_A2; } static int vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct front_face *front = fh; struct poseidon *p = front->pd; logs(front); strcpy(cap->driver, "tele-video"); strcpy(cap->card, "Telegent Poseidon"); usb_make_path(p->udev, cap->bus_info, sizeof(cap->bus_info)); cap->version = KERNEL_VERSION(0, 0, 1); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | V4L2_CAP_VBI_CAPTURE; return 0; } /*====================================================================*/ static void init_copy(struct video_data *video, bool index) { struct front_face *front = video->front; video->field_count = index; video->lines_copied = 0; video->prev_left = 0 ; video->dst = (char *)videobuf_to_vmalloc(front->curr_frame) + index * video->lines_size; video->vbi->copied = 0; /* set it here */ } static bool get_frame(struct front_face *front, int *need_init) { struct videobuf_buffer *vb = front->curr_frame; if (vb) return true; spin_lock(&front->queue_lock); if (!list_empty(&front->active)) { vb = list_entry(front->active.next, struct videobuf_buffer, queue); if (need_init) *need_init = 1; front->curr_frame = vb; list_del_init(&vb->queue); } spin_unlock(&front->queue_lock); return !!vb; } /* check if the video's buffer is ready */ static bool get_video_frame(struct front_face *front, struct video_data *video) { int need_init = 0; bool ret = true; ret = get_frame(front, &need_init); if (ret && need_init) init_copy(video, 0); return ret; } static void submit_frame(struct front_face *front) { struct videobuf_buffer *vb = front->curr_frame; if (vb == NULL) return; front->curr_frame = NULL; vb->state = VIDEOBUF_DONE; vb->field_count++; do_gettimeofday(&vb->ts); wake_up(&vb->done); } /* * A frame is composed of two fields. If we receive all the two fields, * call the submit_frame() to submit the whole frame to applications. */ static void end_field(struct video_data *video) { /* logs(video->front); */ if (1 == video->field_count) submit_frame(video->front); else init_copy(video, 1); } static void copy_video_data(struct video_data *video, char *src, unsigned int count) { #define copy_data(len) \ do { \ if (++video->lines_copied > video->lines_per_field) \ goto overflow; \ memcpy(video->dst, src, len);\ video->dst += len + video->lines_size; \ src += len; \ count -= len; \ } while (0) while (count && count >= video->lines_size) { if (video->prev_left) { copy_data(video->prev_left); video->prev_left = 0; continue; } copy_data(video->lines_size); } if (count && count < video->lines_size) { memcpy(video->dst, src, count); video->prev_left = video->lines_size - count; video->dst += count; } return; overflow: end_field(video); } static void check_trailer(struct video_data *video, char *src, int count) { struct vbi_data *vbi = video->vbi; int offset; /* trailer's offset */ char *buf; offset = (video->context.pix.sizeimage / 2 + vbi->vbi_size / 2) - (vbi->copied + video->lines_size * video->lines_copied); if (video->prev_left) offset -= (video->lines_size - video->prev_left); if (offset > count || offset <= 0) goto short_package; buf = src + offset; /* trailer : (VFHS) + U32 + U32 + field_num */ if (!strncmp(buf, "VFHS", 4)) { int field_num = *((u32 *)(buf + 12)); if ((field_num & 1) ^ video->field_count) { init_copy(video, video->field_count); return; } copy_video_data(video, src, offset); } short_package: end_field(video); } /* ========== Check this more carefully! =========== */ static inline void copy_vbi_data(struct vbi_data *vbi, char *src, unsigned int count) { struct front_face *front = vbi->front; if (front && get_frame(front, NULL)) { char *buf = videobuf_to_vmalloc(front->curr_frame); if (vbi->video->field_count) buf += (vbi->vbi_size / 2); memcpy(buf + vbi->copied, src, count); } vbi->copied += count; } /* * Copy the normal data (VBI or VIDEO) without the trailer. * VBI is not interlaced, while VIDEO is interlaced. */ static inline void copy_vbi_video_data(struct video_data *video, char *src, unsigned int count) { struct vbi_data *vbi = video->vbi; unsigned int vbi_delta = (vbi->vbi_size / 2) - vbi->copied; if (vbi_delta >= count) { copy_vbi_data(vbi, src, count); } else { if (vbi_delta) { copy_vbi_data(vbi, src, vbi_delta); /* we receive the two fields of the VBI*/ if (vbi->front && video->field_count) submit_frame(vbi->front); } copy_video_data(video, src + vbi_delta, count - vbi_delta); } } static void urb_complete_bulk(struct urb *urb) { struct front_face *front = urb->context; struct video_data *video = &front->pd->video_data; char *src = (char *)urb->transfer_buffer; int count = urb->actual_length; int ret = 0; if (!video->is_streaming || urb->status) { if (urb->status == -EPROTO) goto resend_it; return; } if (!get_video_frame(front, video)) goto resend_it; if (count == urb->transfer_buffer_length) copy_vbi_video_data(video, src, count); else check_trailer(video, src, count); resend_it: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) log(" submit failed: error %d", ret); } /************************* for ISO *********************/ #define GET_SUCCESS (0) #define GET_TRAILER (1) #define GET_TOO_MUCH_BUBBLE (2) #define GET_NONE (3) static int get_chunk(int start, struct urb *urb, int *head, int *tail, int *bubble_err) { struct usb_iso_packet_descriptor *pkt = NULL; int ret = GET_SUCCESS; for (*head = *tail = -1; start < urb->number_of_packets; start++) { pkt = &urb->iso_frame_desc[start]; /* handle the bubble of the Hub */ if (-EOVERFLOW == pkt->status) { if (++*bubble_err > urb->number_of_packets / 3) return GET_TOO_MUCH_BUBBLE; continue; } /* This is the gap */ if (pkt->status || pkt->actual_length <= 0 || pkt->actual_length > ISO_PKT_SIZE) { if (*head != -1) break; continue; } /* a good isochronous packet */ if (pkt->actual_length == ISO_PKT_SIZE) { if (*head == -1) *head = start; *tail = start; continue; } /* trailer is here */ if (pkt->actual_length < ISO_PKT_SIZE) { if (*head == -1) { *head = start; *tail = start; return GET_TRAILER; } break; } } if (*head == -1 && *tail == -1) ret = GET_NONE; return ret; } /* * |__|------|___|-----|_______| * ^ ^ * | | * gap gap */ static void urb_complete_iso(struct urb *urb) { struct front_face *front = urb->context; struct video_data *video = &front->pd->video_data; int bubble_err = 0, head = 0, tail = 0; char *src = (char *)urb->transfer_buffer; int ret = 0; if (!video->is_streaming) return; do { if (!get_video_frame(front, video)) goto out; switch (get_chunk(head, urb, &head, &tail, &bubble_err)) { case GET_SUCCESS: copy_vbi_video_data(video, src + (head * ISO_PKT_SIZE), (tail - head + 1) * ISO_PKT_SIZE); break; case GET_TRAILER: check_trailer(video, src + (head * ISO_PKT_SIZE), ISO_PKT_SIZE); break; case GET_NONE: goto out; case GET_TOO_MUCH_BUBBLE: log("\t We got too much bubble"); schedule_work(&video->bubble_work); return; } } while (head = tail + 1, head < urb->number_of_packets); out: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) log("usb_submit_urb err : %d", ret); } /*============================= [ end ] =====================*/ static int prepare_iso_urb(struct video_data *video) { struct usb_device *udev = video->pd->udev; int i; if (video->urb_array[0]) return 0; for (i = 0; i < SBUF_NUM; i++) { struct urb *urb; void *mem; int j; urb = usb_alloc_urb(PK_PER_URB, GFP_KERNEL); if (urb == NULL) goto out; video->urb_array[i] = urb; mem = usb_alloc_coherent(udev, ISO_PKT_SIZE * PK_PER_URB, GFP_KERNEL, &urb->transfer_dma); urb->complete = urb_complete_iso; /* handler */ urb->dev = udev; urb->context = video->front; urb->pipe = usb_rcvisocpipe(udev, video->endpoint_addr); urb->interval = 1; urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->number_of_packets = PK_PER_URB; urb->transfer_buffer = mem; urb->transfer_buffer_length = PK_PER_URB * ISO_PKT_SIZE; for (j = 0; j < PK_PER_URB; j++) { urb->iso_frame_desc[j].offset = ISO_PKT_SIZE * j; urb->iso_frame_desc[j].length = ISO_PKT_SIZE; } } return 0; out: for (; i > 0; i--) ; return -ENOMEM; } /* return the succeeded number of the allocation */ int alloc_bulk_urbs_generic(struct urb **urb_array, int num, struct usb_device *udev, u8 ep_addr, int buf_size, gfp_t gfp_flags, usb_complete_t complete_fn, void *context) { int i = 0; for (; i < num; i++) { void *mem; struct urb *urb = usb_alloc_urb(0, gfp_flags); if (urb == NULL) return i; mem = usb_alloc_coherent(udev, buf_size, gfp_flags, &urb->transfer_dma); if (mem == NULL) { usb_free_urb(urb); return i; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, ep_addr), mem, buf_size, complete_fn, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; urb_array[i] = urb; } return i; } void free_all_urb_generic(struct urb **urb_array, int num) { int i; struct urb *urb; for (i = 0; i < num; i++) { urb = urb_array[i]; if (urb) { usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); urb_array[i] = NULL; } } } static int prepare_bulk_urb(struct video_data *video) { if (video->urb_array[0]) return 0; alloc_bulk_urbs_generic(video->urb_array, SBUF_NUM, video->pd->udev, video->endpoint_addr, 0x2000, GFP_KERNEL, urb_complete_bulk, video->front); return 0; } /* free the URBs */ static void free_all_urb(struct video_data *video) { free_all_urb_generic(video->urb_array, SBUF_NUM); } static void pd_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { videobuf_vmalloc_free(vb); vb->state = VIDEOBUF_NEEDS_INIT; } static void pd_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct front_face *front = q->priv_data; vb->state = VIDEOBUF_QUEUED; list_add_tail(&vb->queue, &front->active); } static int pd_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct front_face *front = q->priv_data; int rc; switch (front->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if (VIDEOBUF_NEEDS_INIT == vb->state) { struct v4l2_pix_format *pix; pix = &front->pd->video_data.context.pix; vb->size = pix->sizeimage; /* real frame size */ vb->width = pix->width; vb->height = pix->height; rc = videobuf_iolock(q, vb, NULL); if (rc < 0) return rc; } break; case V4L2_BUF_TYPE_VBI_CAPTURE: if (VIDEOBUF_NEEDS_INIT == vb->state) { vb->size = front->pd->vbi_data.vbi_size; rc = videobuf_iolock(q, vb, NULL); if (rc < 0) return rc; } break; default: return -EINVAL; } vb->field = field; vb->state = VIDEOBUF_PREPARED; return 0; } static int fire_all_urb(struct video_data *video) { int i, ret; video->is_streaming = 1; for (i = 0; i < SBUF_NUM; i++) { ret = usb_submit_urb(video->urb_array[i], GFP_KERNEL); if (ret) log("(%d) failed: error %d", i, ret); } return ret; } static int start_video_stream(struct poseidon *pd) { struct video_data *video = &pd->video_data; s32 cmd_status; send_set_req(pd, TAKE_REQUEST, 0, &cmd_status); send_set_req(pd, PLAY_SERVICE, TLG_TUNE_PLAY_SVC_START, &cmd_status); if (pd->cur_transfer_mode) { prepare_iso_urb(video); INIT_WORK(&video->bubble_work, iso_bubble_handler); } else { /* The bulk mode does not need a bubble handler */ prepare_bulk_urb(video); } fire_all_urb(video); return 0; } static int pd_buf_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { struct front_face *front = q->priv_data; struct poseidon *pd = front->pd; switch (front->type) { default: return -EINVAL; case V4L2_BUF_TYPE_VIDEO_CAPTURE: { struct video_data *video = &pd->video_data; struct v4l2_pix_format *pix = &video->context.pix; *size = PAGE_ALIGN(pix->sizeimage);/* page aligned frame size */ if (*count < 4) *count = 4; if (1) { /* same in different altersetting */ video->endpoint_addr = 0x82; video->vbi = &pd->vbi_data; video->vbi->video = video; video->pd = pd; video->lines_per_field = pix->height / 2; video->lines_size = pix->width * 2; video->front = front; } return start_video_stream(pd); } case V4L2_BUF_TYPE_VBI_CAPTURE: { struct vbi_data *vbi = &pd->vbi_data; *size = PAGE_ALIGN(vbi->vbi_size); log("size : %d", *size); if (*count == 0) *count = 4; } break; } return 0; } static struct videobuf_queue_ops pd_video_qops = { .buf_setup = pd_buf_setup, .buf_prepare = pd_buf_prepare, .buf_queue = pd_buf_queue, .buf_release = pd_buf_release, }; static int vidioc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) { if (ARRAY_SIZE(poseidon_formats) <= f->index) return -EINVAL; f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; f->flags = 0; f->pixelformat = poseidon_formats[f->index].fourcc; strcpy(f->description, poseidon_formats[f->index].name); return 0; } static int vidioc_g_fmt(struct file *file, void *fh, struct v4l2_format *f) { struct front_face *front = fh; struct poseidon *pd = front->pd; logs(front); f->fmt.pix = pd->video_data.context.pix; return 0; } static int vidioc_try_fmt(struct file *file, void *fh, struct v4l2_format *f) { return 0; } /* * VLC calls VIDIOC_S_STD before VIDIOC_S_FMT, while * Mplayer calls them in the reverse order. */ static int pd_vidioc_s_fmt(struct poseidon *pd, struct v4l2_pix_format *pix) { struct video_data *video = &pd->video_data; struct running_context *context = &video->context; struct v4l2_pix_format *pix_def = &context->pix; s32 ret = 0, cmd_status = 0, vid_resol; /* set the pixel format to firmware */ if (pix->pixelformat == V4L2_PIX_FMT_RGB565) { vid_resol = TLG_TUNER_VID_FORMAT_RGB_565; } else { pix->pixelformat = V4L2_PIX_FMT_YUYV; vid_resol = TLG_TUNER_VID_FORMAT_YUV; } ret = send_set_req(pd, VIDEO_STREAM_FMT_SEL, vid_resol, &cmd_status); /* set the resolution to firmware */ vid_resol = TLG_TUNE_VID_RES_720; switch (pix->width) { case 704: vid_resol = TLG_TUNE_VID_RES_704; break; default: pix->width = 720; case 720: break; } ret |= send_set_req(pd, VIDEO_ROSOLU_SEL, vid_resol, &cmd_status); if (ret || cmd_status) return -EBUSY; pix_def->pixelformat = pix->pixelformat; /* save it */ pix->height = (context->tvnormid & V4L2_STD_525_60) ? 480 : 576; /* Compare with the default setting */ if ((pix_def->width != pix->width) || (pix_def->height != pix->height)) { pix_def->width = pix->width; pix_def->height = pix->height; pix_def->bytesperline = pix->width * 2; pix_def->sizeimage = pix->width * pix->height * 2; } *pix = *pix_def; return 0; } static int vidioc_s_fmt(struct file *file, void *fh, struct v4l2_format *f) { struct front_face *front = fh; struct poseidon *pd = front->pd; logs(front); /* stop VBI here */ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != f->type) return -EINVAL; mutex_lock(&pd->lock); if (pd->file_for_stream == NULL) pd->file_for_stream = file; else if (file != pd->file_for_stream) { mutex_unlock(&pd->lock); return -EINVAL; } pd_vidioc_s_fmt(pd, &f->fmt.pix); mutex_unlock(&pd->lock); return 0; } static int vidioc_g_fmt_vbi(struct file *file, void *fh, struct v4l2_format *v4l2_f) { struct front_face *front = fh; struct poseidon *pd = front->pd; struct v4l2_vbi_format *vbi_fmt = &v4l2_f->fmt.vbi; vbi_fmt->samples_per_line = 720 * 2; vbi_fmt->sampling_rate = 6750000 * 4; vbi_fmt->sample_format = V4L2_PIX_FMT_GREY; vbi_fmt->offset = 64 * 4; /*FIXME: why offset */ if (pd->video_data.context.tvnormid & V4L2_STD_525_60) { vbi_fmt->start[0] = 10; vbi_fmt->start[1] = 264; vbi_fmt->count[0] = V4L_NTSC_VBI_LINES; vbi_fmt->count[1] = V4L_NTSC_VBI_LINES; } else { vbi_fmt->start[0] = 6; vbi_fmt->start[1] = 314; vbi_fmt->count[0] = V4L_PAL_VBI_LINES; vbi_fmt->count[1] = V4L_PAL_VBI_LINES; } vbi_fmt->flags = V4L2_VBI_UNSYNC; logs(front); return 0; } static int set_std(struct poseidon *pd, v4l2_std_id *norm) { struct video_data *video = &pd->video_data; struct vbi_data *vbi = &pd->vbi_data; struct running_context *context; struct v4l2_pix_format *pix; s32 i, ret = 0, cmd_status, param; int height; for (i = 0; i < POSEIDON_TVNORMS; i++) { if (*norm & poseidon_tvnorms[i].v4l2_id) { param = poseidon_tvnorms[i].tlg_tvnorm; log("name : %s", poseidon_tvnorms[i].name); goto found; } } return -EINVAL; found: mutex_lock(&pd->lock); ret = send_set_req(pd, VIDEO_STD_SEL, param, &cmd_status); if (ret || cmd_status) goto out; /* Set vbi size and check the height of the frame */ context = &video->context; context->tvnormid = poseidon_tvnorms[i].v4l2_id; if (context->tvnormid & V4L2_STD_525_60) { vbi->vbi_size = V4L_NTSC_VBI_FRAMESIZE; height = 480; } else { vbi->vbi_size = V4L_PAL_VBI_FRAMESIZE; height = 576; } pix = &context->pix; if (pix->height != height) { pix->height = height; pix->sizeimage = pix->width * pix->height * 2; } out: mutex_unlock(&pd->lock); return ret; } static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *norm) { struct front_face *front = fh; logs(front); return set_std(front->pd, norm); } static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *in) { struct front_face *front = fh; if (in->index < 0 || in->index >= POSEIDON_INPUTS) return -EINVAL; strcpy(in->name, pd_inputs[in->index].name); in->type = V4L2_INPUT_TYPE_TUNER; /* * the audio input index mixed with this video input, * Poseidon only have one audio/video, set to "0" */ in->audioset = 0; in->tuner = 0; in->std = V4L2_STD_ALL; in->status = 0; logs(front); return 0; } static int vidioc_g_input(struct file *file, void *fh, unsigned int *i) { struct front_face *front = fh; struct poseidon *pd = front->pd; struct running_context *context = &pd->video_data.context; logs(front); *i = context->sig_index; return 0; } /* We can support several inputs */ static int vidioc_s_input(struct file *file, void *fh, unsigned int i) { struct front_face *front = fh; struct poseidon *pd = front->pd; s32 ret, cmd_status; if (i < 0 || i >= POSEIDON_INPUTS) return -EINVAL; ret = send_set_req(pd, SGNL_SRC_SEL, pd_inputs[i].tlg_src, &cmd_status); if (ret) return ret; pd->video_data.context.sig_index = i; return 0; } static struct poseidon_control *check_control_id(__u32 id) { struct poseidon_control *control = &controls[0]; int array_size = ARRAY_SIZE(controls); for (; control < &controls[array_size]; control++) if (control->v4l2_ctrl.id == id) return control; return NULL; } static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *a) { struct poseidon_control *control = NULL; control = check_control_id(a->id); if (!control) return -EINVAL; *a = control->v4l2_ctrl; return 0; } static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *ctrl) { struct front_face *front = fh; struct poseidon *pd = front->pd; struct poseidon_control *control = NULL; struct tuner_custom_parameter_s tuner_param; s32 ret = 0, cmd_status; control = check_control_id(ctrl->id); if (!control) return -EINVAL; mutex_lock(&pd->lock); ret = send_get_req(pd, TUNER_CUSTOM_PARAMETER, control->vc_id, &tuner_param, &cmd_status, sizeof(tuner_param)); mutex_unlock(&pd->lock); if (ret || cmd_status) return -1; ctrl->value = tuner_param.param_value; return 0; } static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a) { struct tuner_custom_parameter_s param = {0}; struct poseidon_control *control = NULL; struct front_face *front = fh; struct poseidon *pd = front->pd; s32 ret = 0, cmd_status, params; control = check_control_id(a->id); if (!control) return -EINVAL; param.param_value = a->value; param.param_id = control->vc_id; params = *(s32 *)&param; /* temp code */ mutex_lock(&pd->lock); ret = send_set_req(pd, TUNER_CUSTOM_PARAMETER, params, &cmd_status); ret = send_set_req(pd, TAKE_REQUEST, 0, &cmd_status); mutex_unlock(&pd->lock); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ/4); return ret; } /* Audio ioctls */ static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a) { if (0 != a->index) return -EINVAL; a->capability = V4L2_AUDCAP_STEREO; strcpy(a->name, "USB audio in"); /*Poseidon have no AVL function.*/ a->mode = 0; return 0; } static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a) { a->index = 0; a->capability = V4L2_AUDCAP_STEREO; strcpy(a->name, "USB audio in"); a->mode = 0; return 0; } static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a) { return (0 == a->index) ? 0 : -EINVAL; } /* Tuner ioctls */ static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *tuner) { struct front_face *front = fh; struct poseidon *pd = front->pd; struct tuner_atv_sig_stat_s atv_stat; s32 count = 5, ret, cmd_status; int index; if (0 != tuner->index) return -EINVAL; mutex_lock(&pd->lock); ret = send_get_req(pd, TUNER_STATUS, TLG_MODE_ANALOG_TV, &atv_stat, &cmd_status, sizeof(atv_stat)); while (atv_stat.sig_lock_busy && count-- && !ret) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ); ret = send_get_req(pd, TUNER_STATUS, TLG_MODE_ANALOG_TV, &atv_stat, &cmd_status, sizeof(atv_stat)); } mutex_unlock(&pd->lock); if (debug_mode) log("P:%d,S:%d", atv_stat.sig_present, atv_stat.sig_strength); if (ret || cmd_status) tuner->signal = 0; else if (atv_stat.sig_present && !atv_stat.sig_strength) tuner->signal = 0xFFFF; else tuner->signal = (atv_stat.sig_strength * 255 / 10) << 8; strcpy(tuner->name, "Telegent Systems"); tuner->type = V4L2_TUNER_ANALOG_TV; tuner->rangelow = TUNER_FREQ_MIN / 62500; tuner->rangehigh = TUNER_FREQ_MAX / 62500; tuner->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; index = pd->video_data.context.audio_idx; tuner->rxsubchans = pd_audio_modes[index].v4l2_audio_sub; tuner->audmode = pd_audio_modes[index].v4l2_audio_mode; tuner->afc = 0; logs(front); return 0; } static int pd_vidioc_s_tuner(struct poseidon *pd, int index) { s32 ret = 0, cmd_status, param, audiomode; mutex_lock(&pd->lock); param = pd_audio_modes[index].tlg_audio_mode; ret = send_set_req(pd, TUNER_AUD_MODE, param, &cmd_status); audiomode = get_audio_std(pd->video_data.context.tvnormid); ret |= send_set_req(pd, TUNER_AUD_ANA_STD, audiomode, &cmd_status); if (!ret) pd->video_data.context.audio_idx = index; mutex_unlock(&pd->lock); return ret; } static int vidioc_s_tuner(struct file *file, void *fh, struct v4l2_tuner *a) { struct front_face *front = fh; struct poseidon *pd = front->pd; int index; if (0 != a->index) return -EINVAL; logs(front); for (index = 0; index < POSEIDON_AUDIOMODS; index++) if (a->audmode == pd_audio_modes[index].v4l2_audio_mode) return pd_vidioc_s_tuner(pd, index); return -EINVAL; } static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *freq) { struct front_face *front = fh; struct poseidon *pd = front->pd; struct running_context *context = &pd->video_data.context; if (0 != freq->tuner) return -EINVAL; freq->frequency = context->freq; freq->type = V4L2_TUNER_ANALOG_TV; return 0; } static int set_frequency(struct poseidon *pd, __u32 frequency) { s32 ret = 0, param, cmd_status; struct running_context *context = &pd->video_data.context; param = frequency * 62500 / 1000; if (param < TUNER_FREQ_MIN/1000 || param > TUNER_FREQ_MAX / 1000) return -EINVAL; mutex_lock(&pd->lock); ret = send_set_req(pd, TUNE_FREQ_SELECT, param, &cmd_status); ret = send_set_req(pd, TAKE_REQUEST, 0, &cmd_status); msleep(250); /* wait for a while until the hardware is ready. */ context->freq = frequency; mutex_unlock(&pd->lock); return ret; } static int vidioc_s_frequency(struct file *file, void *fh, struct v4l2_frequency *freq) { struct front_face *front = fh; struct poseidon *pd = front->pd; logs(front); #ifdef CONFIG_PM pd->pm_suspend = pm_video_suspend; pd->pm_resume = pm_video_resume; #endif return set_frequency(pd, freq->frequency); } static int vidioc_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *b) { struct front_face *front = file->private_data; logs(front); return videobuf_reqbufs(&front->q, b); } static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct front_face *front = file->private_data; logs(front); return videobuf_querybuf(&front->q, b); } static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct front_face *front = file->private_data; return videobuf_qbuf(&front->q, b); } static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct front_face *front = file->private_data; return videobuf_dqbuf(&front->q, b, file->f_flags & O_NONBLOCK); } /* Just stop the URBs, do not free the URBs */ static int usb_transfer_stop(struct video_data *video) { if (video->is_streaming) { int i; s32 cmd_status; struct poseidon *pd = video->pd; video->is_streaming = 0; for (i = 0; i < SBUF_NUM; ++i) { if (video->urb_array[i]) usb_kill_urb(video->urb_array[i]); } send_set_req(pd, PLAY_SERVICE, TLG_TUNE_PLAY_SVC_STOP, &cmd_status); } return 0; } int stop_all_video_stream(struct poseidon *pd) { struct video_data *video = &pd->video_data; struct vbi_data *vbi = &pd->vbi_data; mutex_lock(&pd->lock); if (video->is_streaming) { struct front_face *front = video->front; /* stop the URBs */ usb_transfer_stop(video); free_all_urb(video); /* stop the host side of VIDEO */ videobuf_stop(&front->q); videobuf_mmap_free(&front->q); /* stop the host side of VBI */ front = vbi->front; if (front) { videobuf_stop(&front->q); videobuf_mmap_free(&front->q); } } mutex_unlock(&pd->lock); return 0; } /* * The bubbles can seriously damage the video's quality, * though it occurs in very rare situation. */ static void iso_bubble_handler(struct work_struct *w) { struct video_data *video; struct poseidon *pd; video = container_of(w, struct video_data, bubble_work); pd = video->pd; mutex_lock(&pd->lock); usb_transfer_stop(video); msleep(500); start_video_stream(pd); mutex_unlock(&pd->lock); } static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type type) { struct front_face *front = fh; logs(front); if (unlikely(type != front->type)) return -EINVAL; return videobuf_streamon(&front->q); } static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) { struct front_face *front = file->private_data; logs(front); if (unlikely(type != front->type)) return -EINVAL; return videobuf_streamoff(&front->q); } /* Set the firmware's default values : need altersetting */ static int pd_video_checkmode(struct poseidon *pd) { s32 ret = 0, cmd_status, audiomode; set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ/2); /* choose the altersetting */ ret = usb_set_interface(pd->udev, 0, (pd->cur_transfer_mode ? ISO_3K_BULK_ALTERNATE_IFACE : BULK_ALTERNATE_IFACE)); if (ret < 0) goto error; /* set default parameters for PAL-D , with the VBI enabled*/ ret = set_tuner_mode(pd, TLG_MODE_ANALOG_TV); ret |= send_set_req(pd, SGNL_SRC_SEL, TLG_SIG_SRC_ANTENNA, &cmd_status); ret |= send_set_req(pd, VIDEO_STD_SEL, TLG_TUNE_VSTD_PAL_D, &cmd_status); ret |= send_set_req(pd, VIDEO_STREAM_FMT_SEL, TLG_TUNER_VID_FORMAT_YUV, &cmd_status); ret |= send_set_req(pd, VIDEO_ROSOLU_SEL, TLG_TUNE_VID_RES_720, &cmd_status); ret |= send_set_req(pd, TUNE_FREQ_SELECT, TUNER_FREQ_MIN, &cmd_status); ret |= send_set_req(pd, VBI_DATA_SEL, 1, &cmd_status);/* enable vbi */ /* set the audio */ audiomode = get_audio_std(pd->video_data.context.tvnormid); ret |= send_set_req(pd, TUNER_AUD_ANA_STD, audiomode, &cmd_status); ret |= send_set_req(pd, TUNER_AUD_MODE, TLG_TUNE_TVAUDIO_MODE_STEREO, &cmd_status); ret |= send_set_req(pd, AUDIO_SAMPLE_RATE_SEL, ATV_AUDIO_RATE_48K, &cmd_status); error: return ret; } #ifdef CONFIG_PM static int pm_video_suspend(struct poseidon *pd) { /* stop audio */ pm_alsa_suspend(pd); /* stop and free all the URBs */ usb_transfer_stop(&pd->video_data); free_all_urb(&pd->video_data); /* reset the interface */ usb_set_interface(pd->udev, 0, 0); msleep(300); return 0; } static int restore_v4l2_context(struct poseidon *pd, struct running_context *context) { struct front_face *front = pd->video_data.front; pd_video_checkmode(pd); set_std(pd, &context->tvnormid); vidioc_s_input(NULL, front, context->sig_index); pd_vidioc_s_tuner(pd, context->audio_idx); pd_vidioc_s_fmt(pd, &context->pix); set_frequency(pd, context->freq); return 0; } static int pm_video_resume(struct poseidon *pd) { struct video_data *video = &pd->video_data; /* resume the video */ /* [1] restore the origin V4L2 parameters */ restore_v4l2_context(pd, &video->context); /* [2] initiate video copy variables */ if (video->front->curr_frame) init_copy(video, 0); /* [3] fire urbs */ start_video_stream(pd); /* resume the audio */ pm_alsa_resume(pd); return 0; } #endif void set_debug_mode(struct video_device *vfd, int debug_mode) { vfd->debug = 0; if (debug_mode & 0x1) vfd->debug = V4L2_DEBUG_IOCTL; if (debug_mode & 0x2) vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG; } static void init_video_context(struct running_context *context) { context->sig_index = 0; context->audio_idx = 1; /* stereo */ context->tvnormid = V4L2_STD_PAL_D; context->pix = (struct v4l2_pix_format) { .width = 720, .height = 576, .pixelformat = V4L2_PIX_FMT_YUYV, .field = V4L2_FIELD_INTERLACED, .bytesperline = 720 * 2, .sizeimage = 720 * 576 * 2, .colorspace = V4L2_COLORSPACE_SMPTE170M, .priv = 0 }; } static int pd_video_open(struct file *file) { struct video_device *vfd = video_devdata(file); struct poseidon *pd = video_get_drvdata(vfd); struct front_face *front = NULL; int ret = -ENOMEM; mutex_lock(&pd->lock); usb_autopm_get_interface(pd->interface); if (vfd->vfl_type == VFL_TYPE_GRABBER && !(pd->state & POSEIDON_STATE_ANALOG)) { front = kzalloc(sizeof(struct front_face), GFP_KERNEL); if (!front) goto out; pd->cur_transfer_mode = usb_transfer_mode;/* bulk or iso */ init_video_context(&pd->video_data.context); ret = pd_video_checkmode(pd); if (ret < 0) { kfree(front); ret = -1; goto out; } pd->state |= POSEIDON_STATE_ANALOG; front->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; pd->video_data.users++; set_debug_mode(vfd, debug_mode); videobuf_queue_vmalloc_init(&front->q, &pd_video_qops, NULL, &front->queue_lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED,/* video is interlacd */ sizeof(struct videobuf_buffer),/*it's enough*/ front, NULL); } else if (vfd->vfl_type == VFL_TYPE_VBI && !(pd->state & POSEIDON_STATE_VBI)) { front = kzalloc(sizeof(struct front_face), GFP_KERNEL); if (!front) goto out; pd->state |= POSEIDON_STATE_VBI; front->type = V4L2_BUF_TYPE_VBI_CAPTURE; pd->vbi_data.front = front; pd->vbi_data.users++; videobuf_queue_vmalloc_init(&front->q, &pd_video_qops, NULL, &front->queue_lock, V4L2_BUF_TYPE_VBI_CAPTURE, V4L2_FIELD_NONE, /* vbi is NONE mode */ sizeof(struct videobuf_buffer), front, NULL); } else { /* maybe add FM support here */ log("other "); ret = -EINVAL; goto out; } front->pd = pd; front->curr_frame = NULL; INIT_LIST_HEAD(&front->active); spin_lock_init(&front->queue_lock); file->private_data = front; kref_get(&pd->kref); mutex_unlock(&pd->lock); return 0; out: usb_autopm_put_interface(pd->interface); mutex_unlock(&pd->lock); return ret; } static int pd_video_release(struct file *file) { struct front_face *front = file->private_data; struct poseidon *pd = front->pd; s32 cmd_status = 0; logs(front); mutex_lock(&pd->lock); if (front->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { pd->state &= ~POSEIDON_STATE_ANALOG; /* stop the device, and free the URBs */ usb_transfer_stop(&pd->video_data); free_all_urb(&pd->video_data); /* stop the firmware */ send_set_req(pd, PLAY_SERVICE, TLG_TUNE_PLAY_SVC_STOP, &cmd_status); pd->file_for_stream = NULL; pd->video_data.users--; } else if (front->type == V4L2_BUF_TYPE_VBI_CAPTURE) { pd->state &= ~POSEIDON_STATE_VBI; pd->vbi_data.front = NULL; pd->vbi_data.users--; } videobuf_stop(&front->q); videobuf_mmap_free(&front->q); usb_autopm_put_interface(pd->interface); mutex_unlock(&pd->lock); kfree(front); file->private_data = NULL; kref_put(&pd->kref, poseidon_delete); return 0; } static int pd_video_mmap(struct file *file, struct vm_area_struct *vma) { struct front_face *front = file->private_data; return videobuf_mmap_mapper(&front->q, vma); } static unsigned int pd_video_poll(struct file *file, poll_table *table) { struct front_face *front = file->private_data; return videobuf_poll_stream(file, &front->q, table); } static ssize_t pd_video_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct front_face *front = file->private_data; return videobuf_read_stream(&front->q, buffer, count, ppos, 0, file->f_flags & O_NONBLOCK); } /* This struct works for both VIDEO and VBI */ static const struct v4l2_file_operations pd_video_fops = { .owner = THIS_MODULE, .open = pd_video_open, .release = pd_video_release, .read = pd_video_read, .poll = pd_video_poll, .mmap = pd_video_mmap, .ioctl = video_ioctl2, /* maybe changed in future */ }; static const struct v4l2_ioctl_ops pd_video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, /* Video format */ .vidioc_g_fmt_vid_cap = vidioc_g_fmt, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt, .vidioc_s_fmt_vid_cap = vidioc_s_fmt, .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi, /* VBI */ .vidioc_try_fmt_vid_cap = vidioc_try_fmt, /* Input */ .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_enum_input = vidioc_enum_input, /* Audio ioctls */ .vidioc_enumaudio = vidioc_enumaudio, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, /* Tuner ioctls */ .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_s_std = vidioc_s_std, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, /* Buffer handlers */ .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, /* Stream on/off */ .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, /* Control handling */ .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, }; static struct video_device pd_video_template = { .name = "Telegent-Video", .fops = &pd_video_fops, .minor = -1, .release = video_device_release, .tvnorms = V4L2_STD_ALL, .ioctl_ops = &pd_video_ioctl_ops, }; struct video_device *vdev_init(struct poseidon *pd, struct video_device *tmp) { struct video_device *vfd; vfd = video_device_alloc(); if (vfd == NULL) return NULL; *vfd = *tmp; vfd->minor = -1; vfd->v4l2_dev = &pd->v4l2_dev; /*vfd->parent = &(pd->udev->dev); */ vfd->release = video_device_release; video_set_drvdata(vfd, pd); return vfd; } void destroy_video_device(struct video_device **v_dev) { struct video_device *dev = *v_dev; if (dev == NULL) return; if (video_is_registered(dev)) video_unregister_device(dev); else video_device_release(dev); *v_dev = NULL; } void pd_video_exit(struct poseidon *pd) { struct video_data *video = &pd->video_data; struct vbi_data *vbi = &pd->vbi_data; destroy_video_device(&video->v_dev); destroy_video_device(&vbi->v_dev); log(); } int pd_video_init(struct poseidon *pd) { struct video_data *video = &pd->video_data; struct vbi_data *vbi = &pd->vbi_data; int ret = -ENOMEM; video->v_dev = vdev_init(pd, &pd_video_template); if (video->v_dev == NULL) goto out; ret = video_register_device(video->v_dev, VFL_TYPE_GRABBER, -1); if (ret != 0) goto out; /* VBI uses the same template as video */ vbi->v_dev = vdev_init(pd, &pd_video_template); if (vbi->v_dev == NULL) { ret = -ENOMEM; goto out; } ret = video_register_device(vbi->v_dev, VFL_TYPE_VBI, -1); if (ret != 0) goto out; log("register VIDEO/VBI devices"); return 0; out: log("VIDEO/VBI devices register failed, : %d", ret); pd_video_exit(pd); return ret; }
gpl-2.0
fefifofum/android_kernel_bq_maxwell2plus_3.0.8
drivers/media/video/cx18/cx18-av-core.c
8340
41291
/* * cx18 ADEC audio functions * * Derived from cx25840-core.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <media/v4l2-chip-ident.h> #include "cx18-driver.h" #include "cx18-io.h" #include "cx18-cards.h" int cx18_av_write(struct cx18 *cx, u16 addr, u8 value) { u32 reg = 0xc40000 + (addr & ~3); u32 mask = 0xff; int shift = (addr & 3) * 8; u32 x = cx18_read_reg(cx, reg); x = (x & ~(mask << shift)) | ((u32)value << shift); cx18_write_reg(cx, x, reg); return 0; } int cx18_av_write_expect(struct cx18 *cx, u16 addr, u8 value, u8 eval, u8 mask) { u32 reg = 0xc40000 + (addr & ~3); int shift = (addr & 3) * 8; u32 x = cx18_read_reg(cx, reg); x = (x & ~((u32)0xff << shift)) | ((u32)value << shift); cx18_write_reg_expect(cx, x, reg, ((u32)eval << shift), ((u32)mask << shift)); return 0; } int cx18_av_write4(struct cx18 *cx, u16 addr, u32 value) { cx18_write_reg(cx, value, 0xc40000 + addr); return 0; } int cx18_av_write4_expect(struct cx18 *cx, u16 addr, u32 value, u32 eval, u32 mask) { cx18_write_reg_expect(cx, value, 0xc40000 + addr, eval, mask); return 0; } int cx18_av_write4_noretry(struct cx18 *cx, u16 addr, u32 value) { cx18_write_reg_noretry(cx, value, 0xc40000 + addr); return 0; } u8 cx18_av_read(struct cx18 *cx, u16 addr) { u32 x = cx18_read_reg(cx, 0xc40000 + (addr & ~3)); int shift = (addr & 3) * 8; return (x >> shift) & 0xff; } u32 cx18_av_read4(struct cx18 *cx, u16 addr) { return cx18_read_reg(cx, 0xc40000 + addr); } int cx18_av_and_or(struct cx18 *cx, u16 addr, unsigned and_mask, u8 or_value) { return cx18_av_write(cx, addr, (cx18_av_read(cx, addr) & and_mask) | or_value); } int cx18_av_and_or4(struct cx18 *cx, u16 addr, u32 and_mask, u32 or_value) { return cx18_av_write4(cx, addr, (cx18_av_read4(cx, addr) & and_mask) | or_value); } static void cx18_av_init(struct cx18 *cx) { /* * The crystal freq used in calculations in this driver will be * 28.636360 MHz. * Aim to run the PLLs' VCOs near 400 MHz to minimze errors. */ /* * VDCLK Integer = 0x0f, Post Divider = 0x04 * AIMCLK Integer = 0x0e, Post Divider = 0x16 */ cx18_av_write4(cx, CXADEC_PLL_CTRL1, 0x160e040f); /* VDCLK Fraction = 0x2be2fe */ /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz before post divide */ cx18_av_write4(cx, CXADEC_VID_PLL_FRAC, 0x002be2fe); /* AIMCLK Fraction = 0x05227ad */ /* xtal * 0xe.2913d68/0x16 = 48000 * 384: 406 MHz pre post-div*/ cx18_av_write4(cx, CXADEC_AUX_PLL_FRAC, 0x005227ad); /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x16 */ cx18_av_write(cx, CXADEC_I2S_MCLK, 0x56); } static void cx18_av_initialize(struct v4l2_subdev *sd) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); int default_volume; u32 v; cx18_av_loadfw(cx); /* Stop 8051 code execution */ cx18_av_write4_expect(cx, CXADEC_DL_CTL, 0x03000000, 0x03000000, 0x13000000); /* initallize the PLL by toggling sleep bit */ v = cx18_av_read4(cx, CXADEC_HOST_REG1); /* enable sleep mode - register appears to be read only... */ cx18_av_write4_expect(cx, CXADEC_HOST_REG1, v | 1, v, 0xfffe); /* disable sleep mode */ cx18_av_write4_expect(cx, CXADEC_HOST_REG1, v & 0xfffe, v & 0xfffe, 0xffff); /* initialize DLLs */ v = cx18_av_read4(cx, CXADEC_DLL1_DIAG_CTRL) & 0xE1FFFEFF; /* disable FLD */ cx18_av_write4(cx, CXADEC_DLL1_DIAG_CTRL, v); /* enable FLD */ cx18_av_write4(cx, CXADEC_DLL1_DIAG_CTRL, v | 0x10000100); v = cx18_av_read4(cx, CXADEC_DLL2_DIAG_CTRL) & 0xE1FFFEFF; /* disable FLD */ cx18_av_write4(cx, CXADEC_DLL2_DIAG_CTRL, v); /* enable FLD */ cx18_av_write4(cx, CXADEC_DLL2_DIAG_CTRL, v | 0x06000100); /* set analog bias currents. Set Vreg to 1.20V. */ cx18_av_write4(cx, CXADEC_AFE_DIAG_CTRL1, 0x000A1802); v = cx18_av_read4(cx, CXADEC_AFE_DIAG_CTRL3) | 1; /* enable TUNE_FIL_RST */ cx18_av_write4_expect(cx, CXADEC_AFE_DIAG_CTRL3, v, v, 0x03009F0F); /* disable TUNE_FIL_RST */ cx18_av_write4_expect(cx, CXADEC_AFE_DIAG_CTRL3, v & 0xFFFFFFFE, v & 0xFFFFFFFE, 0x03009F0F); /* enable 656 output */ cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x040C00); /* video output drive strength */ cx18_av_and_or4(cx, CXADEC_PIN_CTRL2, ~0, 0x2); /* reset video */ cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0x8000); cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0); /* * Disable Video Auto-config of the Analog Front End and Video PLL. * * Since we only use BT.656 pixel mode, which works for both 525 and 625 * line systems, it's just easier for us to set registers * 0x102 (CXADEC_CHIP_CTRL), 0x104-0x106 (CXADEC_AFE_CTRL), * 0x108-0x109 (CXADEC_PLL_CTRL1), and 0x10c-0x10f (CXADEC_VID_PLL_FRAC) * ourselves, than to run around cleaning up after the auto-config. * * (Note: my CX23418 chip doesn't seem to let the ACFG_DIS bit * get set to 1, but OTOH, it doesn't seem to do AFE and VID PLL * autoconfig either.) * * As a default, also turn off Dual mode for ADC2 and set ADC2 to CH3. */ cx18_av_and_or4(cx, CXADEC_CHIP_CTRL, 0xFFFBFFFF, 0x00120000); /* Setup the Video and and Aux/Audio PLLs */ cx18_av_init(cx); /* set video to auto-detect */ /* Clear bits 11-12 to enable slow locking mode. Set autodetect mode */ /* set the comb notch = 1 */ cx18_av_and_or4(cx, CXADEC_MODE_CTRL, 0xFFF7E7F0, 0x02040800); /* Enable wtw_en in CRUSH_CTRL (Set bit 22) */ /* Enable maj_sel in CRUSH_CTRL (Set bit 20) */ cx18_av_and_or4(cx, CXADEC_CRUSH_CTRL, ~0, 0x00500000); /* Set VGA_TRACK_RANGE to 0x20 */ cx18_av_and_or4(cx, CXADEC_DFE_CTRL2, 0xFFFF00FF, 0x00002000); /* * Initial VBI setup * VIP-1.1, 10 bit mode, enable Raw, disable sliced, * don't clamp raw samples when codes are in use, 1 byte user D-words, * IDID0 has line #, RP code V bit transition on VBLANK, data during * blanking intervals */ cx18_av_write4(cx, CXADEC_OUT_CTRL1, 0x4013252e); /* Set the video input. The setting in MODE_CTRL gets lost when we do the above setup */ /* EncSetSignalStd(dwDevNum, pEnc->dwSigStd); */ /* EncSetVideoInput(dwDevNum, pEnc->VidIndSelection); */ /* * Analog Front End (AFE) * Default to luma on ch1/ADC1, chroma on ch2/ADC2, SIF on ch3/ADC2 * bypass_ch[1-3] use filter * droop_comp_ch[1-3] disable * clamp_en_ch[1-3] disable * aud_in_sel ADC2 * luma_in_sel ADC1 * chroma_in_sel ADC2 * clamp_sel_ch[2-3] midcode * clamp_sel_ch1 video decoder * vga_sel_ch3 audio decoder * vga_sel_ch[1-2] video decoder * half_bw_ch[1-3] disable * +12db_ch[1-3] disable */ cx18_av_and_or4(cx, CXADEC_AFE_CTRL, 0xFF000000, 0x00005D00); /* if(dwEnable && dw3DCombAvailable) { */ /* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x7728021F); */ /* } else { */ /* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x6628021F); */ /* } */ cx18_av_write4(cx, CXADEC_SRC_COMB_CFG, 0x6628021F); default_volume = cx18_av_read(cx, 0x8d4); /* * Enforce the legacy volume scale mapping limits to avoid * -ERANGE errors when initializing the volume control */ if (default_volume > 228) { /* Bottom out at -96 dB, v4l2 vol range 0x2e00-0x2fff */ default_volume = 228; cx18_av_write(cx, 0x8d4, 228); } else if (default_volume < 20) { /* Top out at + 8 dB, v4l2 vol range 0xfe00-0xffff */ default_volume = 20; cx18_av_write(cx, 0x8d4, 20); } default_volume = (((228 - default_volume) >> 1) + 23) << 9; state->volume->cur.val = state->volume->default_value = default_volume; v4l2_ctrl_handler_setup(&state->hdl); } static int cx18_av_reset(struct v4l2_subdev *sd, u32 val) { cx18_av_initialize(sd); return 0; } static int cx18_av_load_fw(struct v4l2_subdev *sd) { struct cx18_av_state *state = to_cx18_av_state(sd); if (!state->is_initialized) { /* initialize on first use */ state->is_initialized = 1; cx18_av_initialize(sd); } return 0; } void cx18_av_std_setup(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; v4l2_std_id std = state->std; /* * Video ADC crystal clock to pixel clock SRC decimation ratio * 28.636360 MHz/13.5 Mpps * 256 = 0x21f.07b */ const int src_decimation = 0x21f; int hblank, hactive, burst, vblank, vactive, sc; int vblank656; int luma_lpf, uv_lpf, comb; u32 pll_int, pll_frac, pll_post; /* datasheet startup, step 8d */ if (std & ~V4L2_STD_NTSC) cx18_av_write(cx, 0x49f, 0x11); else cx18_av_write(cx, 0x49f, 0x14); /* * Note: At the end of a field, there are 3 sets of half line duration * (double horizontal rate) pulses: * * 5 (625) or 6 (525) half-lines to blank for the vertical retrace * 5 (625) or 6 (525) vertical sync pulses of half line duration * 5 (625) or 6 (525) half-lines of equalization pulses */ if (std & V4L2_STD_625_50) { /* * The following relationships of half line counts should hold: * 625 = vblank656 + vactive * 10 = vblank656 - vblank = vsync pulses + equalization pulses * * vblank656: half lines after line 625/mid-313 of blanked video * vblank: half lines, after line 5/317, of blanked video * vactive: half lines of active video + * 5 half lines after the end of active video * * As far as I can tell: * vblank656 starts counting from the falling edge of the first * vsync pulse (start of line 1 or mid-313) * vblank starts counting from the after the 5 vsync pulses and * 5 or 4 equalization pulses (start of line 6 or 318) * * For 625 line systems the driver will extract VBI information * from lines 6-23 and lines 318-335 (but the slicer can only * handle 17 lines, not the 18 in the vblank region). * In addition, we need vblank656 and vblank to be one whole * line longer, to cover line 24 and 336, so the SAV/EAV RP * codes get generated such that the encoder can actually * extract line 23 & 335 (WSS). We'll lose 1 line in each field * at the top of the screen. * * It appears the 5 half lines that happen after active * video must be included in vactive (579 instead of 574), * otherwise the colors get badly displayed in various regions * of the screen. I guess the chroma comb filter gets confused * without them (at least when a PVR-350 is the PAL source). */ vblank656 = 48; /* lines 1 - 24 & 313 - 336 */ vblank = 38; /* lines 6 - 24 & 318 - 336 */ vactive = 579; /* lines 24 - 313 & 337 - 626 */ /* * For a 13.5 Mpps clock and 15,625 Hz line rate, a line is * is 864 pixels = 720 active + 144 blanking. ITU-R BT.601 * specifies 12 luma clock periods or ~ 0.9 * 13.5 Mpps after * the end of active video to start a horizontal line, so that * leaves 132 pixels of hblank to ignore. */ hblank = 132; hactive = 720; /* * Burst gate delay (for 625 line systems) * Hsync leading edge to color burst rise = 5.6 us * Color burst width = 2.25 us * Gate width = 4 pixel clocks * (5.6 us + 2.25/2 us) * 13.5 Mpps + 4/2 clocks = 92.79 clocks */ burst = 93; luma_lpf = 2; if (std & V4L2_STD_PAL) { uv_lpf = 1; comb = 0x20; /* sc = 4433618.75 * src_decimation/28636360 * 2^13 */ sc = 688700; } else if (std == V4L2_STD_PAL_Nc) { uv_lpf = 1; comb = 0x20; /* sc = 3582056.25 * src_decimation/28636360 * 2^13 */ sc = 556422; } else { /* SECAM */ uv_lpf = 0; comb = 0; /* (fr + fb)/2 = (4406260 + 4250000)/2 = 4328130 */ /* sc = 4328130 * src_decimation/28636360 * 2^13 */ sc = 672314; } } else { /* * The following relationships of half line counts should hold: * 525 = prevsync + vblank656 + vactive * 12 = vblank656 - vblank = vsync pulses + equalization pulses * * prevsync: 6 half-lines before the vsync pulses * vblank656: half lines, after line 3/mid-266, of blanked video * vblank: half lines, after line 9/272, of blanked video * vactive: half lines of active video * * As far as I can tell: * vblank656 starts counting from the falling edge of the first * vsync pulse (start of line 4 or mid-266) * vblank starts counting from the after the 6 vsync pulses and * 6 or 5 equalization pulses (start of line 10 or 272) * * For 525 line systems the driver will extract VBI information * from lines 10-21 and lines 273-284. */ vblank656 = 38; /* lines 4 - 22 & 266 - 284 */ vblank = 26; /* lines 10 - 22 & 272 - 284 */ vactive = 481; /* lines 23 - 263 & 285 - 525 */ /* * For a 13.5 Mpps clock and 15,734.26 Hz line rate, a line is * is 858 pixels = 720 active + 138 blanking. The Hsync leading * edge should happen 1.2 us * 13.5 Mpps ~= 16 pixels after the * end of active video, leaving 122 pixels of hblank to ignore * before active video starts. */ hactive = 720; hblank = 122; luma_lpf = 1; uv_lpf = 1; /* * Burst gate delay (for 525 line systems) * Hsync leading edge to color burst rise = 5.3 us * Color burst width = 2.5 us * Gate width = 4 pixel clocks * (5.3 us + 2.5/2 us) * 13.5 Mpps + 4/2 clocks = 90.425 clocks */ if (std == V4L2_STD_PAL_60) { burst = 90; luma_lpf = 2; comb = 0x20; /* sc = 4433618.75 * src_decimation/28636360 * 2^13 */ sc = 688700; } else if (std == V4L2_STD_PAL_M) { /* The 97 needs to be verified against PAL-M timings */ burst = 97; comb = 0x20; /* sc = 3575611.49 * src_decimation/28636360 * 2^13 */ sc = 555421; } else { burst = 90; comb = 0x66; /* sc = 3579545.45.. * src_decimation/28636360 * 2^13 */ sc = 556032; } } /* DEBUG: Displays configured PLL frequency */ pll_int = cx18_av_read(cx, 0x108); pll_frac = cx18_av_read4(cx, 0x10c) & 0x1ffffff; pll_post = cx18_av_read(cx, 0x109); CX18_DEBUG_INFO_DEV(sd, "PLL regs = int: %u, frac: %u, post: %u\n", pll_int, pll_frac, pll_post); if (pll_post) { int fsc, pll; u64 tmp; pll = (28636360L * ((((u64)pll_int) << 25) + pll_frac)) >> 25; pll /= pll_post; CX18_DEBUG_INFO_DEV(sd, "Video PLL = %d.%06d MHz\n", pll / 1000000, pll % 1000000); CX18_DEBUG_INFO_DEV(sd, "Pixel rate = %d.%06d Mpixel/sec\n", pll / 8000000, (pll / 8) % 1000000); CX18_DEBUG_INFO_DEV(sd, "ADC XTAL/pixel clock decimation ratio " "= %d.%03d\n", src_decimation / 256, ((src_decimation % 256) * 1000) / 256); tmp = 28636360 * (u64) sc; do_div(tmp, src_decimation); fsc = tmp >> 13; CX18_DEBUG_INFO_DEV(sd, "Chroma sub-carrier initial freq = %d.%06d " "MHz\n", fsc / 1000000, fsc % 1000000); CX18_DEBUG_INFO_DEV(sd, "hblank %i, hactive %i, vblank %i, " "vactive %i, vblank656 %i, src_dec %i, " "burst 0x%02x, luma_lpf %i, uv_lpf %i, " "comb 0x%02x, sc 0x%06x\n", hblank, hactive, vblank, vactive, vblank656, src_decimation, burst, luma_lpf, uv_lpf, comb, sc); } /* Sets horizontal blanking delay and active lines */ cx18_av_write(cx, 0x470, hblank); cx18_av_write(cx, 0x471, 0xff & (((hblank >> 8) & 0x3) | (hactive << 4))); cx18_av_write(cx, 0x472, hactive >> 4); /* Sets burst gate delay */ cx18_av_write(cx, 0x473, burst); /* Sets vertical blanking delay and active duration */ cx18_av_write(cx, 0x474, vblank); cx18_av_write(cx, 0x475, 0xff & (((vblank >> 8) & 0x3) | (vactive << 4))); cx18_av_write(cx, 0x476, vactive >> 4); cx18_av_write(cx, 0x477, vblank656); /* Sets src decimation rate */ cx18_av_write(cx, 0x478, 0xff & src_decimation); cx18_av_write(cx, 0x479, 0xff & (src_decimation >> 8)); /* Sets Luma and UV Low pass filters */ cx18_av_write(cx, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30)); /* Enables comb filters */ cx18_av_write(cx, 0x47b, comb); /* Sets SC Step*/ cx18_av_write(cx, 0x47c, sc); cx18_av_write(cx, 0x47d, 0xff & sc >> 8); cx18_av_write(cx, 0x47e, 0xff & sc >> 16); if (std & V4L2_STD_625_50) { state->slicer_line_delay = 1; state->slicer_line_offset = (6 + state->slicer_line_delay - 2); } else { state->slicer_line_delay = 0; state->slicer_line_offset = (10 + state->slicer_line_delay - 2); } cx18_av_write(cx, 0x47f, state->slicer_line_delay); } static void input_change(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; v4l2_std_id std = state->std; u8 v; /* Follow step 8c and 8d of section 3.16 in the cx18_av datasheet */ cx18_av_write(cx, 0x49f, (std & V4L2_STD_NTSC) ? 0x14 : 0x11); cx18_av_and_or(cx, 0x401, ~0x60, 0); cx18_av_and_or(cx, 0x401, ~0x60, 0x60); if (std & V4L2_STD_525_60) { if (std == V4L2_STD_NTSC_M_JP) { /* Japan uses EIAJ audio standard */ cx18_av_write_expect(cx, 0x808, 0xf7, 0xf7, 0xff); cx18_av_write_expect(cx, 0x80b, 0x02, 0x02, 0x3f); } else if (std == V4L2_STD_NTSC_M_KR) { /* South Korea uses A2 audio standard */ cx18_av_write_expect(cx, 0x808, 0xf8, 0xf8, 0xff); cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f); } else { /* Others use the BTSC audio standard */ cx18_av_write_expect(cx, 0x808, 0xf6, 0xf6, 0xff); cx18_av_write_expect(cx, 0x80b, 0x01, 0x01, 0x3f); } } else if (std & V4L2_STD_PAL) { /* Follow tuner change procedure for PAL */ cx18_av_write_expect(cx, 0x808, 0xff, 0xff, 0xff); cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f); } else if (std & V4L2_STD_SECAM) { /* Select autodetect for SECAM */ cx18_av_write_expect(cx, 0x808, 0xff, 0xff, 0xff); cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f); } v = cx18_av_read(cx, 0x803); if (v & 0x10) { /* restart audio decoder microcontroller */ v &= ~0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); v |= 0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); } } static int cx18_av_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq) { struct cx18 *cx = v4l2_get_subdevdata(sd); input_change(cx); return 0; } static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input, enum cx18_av_audio_input aud_input) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; enum analog_signal_type { NONE, CVBS, Y, C, SIF, Pb, Pr } ch[3] = {NONE, NONE, NONE}; u8 afe_mux_cfg; u8 adc2_cfg; u8 input_mode; u32 afe_cfg; int i; CX18_DEBUG_INFO_DEV(sd, "decoder set video input %d, audio input %d\n", vid_input, aud_input); if (vid_input >= CX18_AV_COMPOSITE1 && vid_input <= CX18_AV_COMPOSITE8) { afe_mux_cfg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1); ch[0] = CVBS; input_mode = 0x0; } else if (vid_input >= CX18_AV_COMPONENT_LUMA1) { int luma = vid_input & 0xf000; int r_chroma = vid_input & 0xf0000; int b_chroma = vid_input & 0xf00000; if ((vid_input & ~0xfff000) || luma < CX18_AV_COMPONENT_LUMA1 || luma > CX18_AV_COMPONENT_LUMA8 || r_chroma < CX18_AV_COMPONENT_R_CHROMA4 || r_chroma > CX18_AV_COMPONENT_R_CHROMA6 || b_chroma < CX18_AV_COMPONENT_B_CHROMA7 || b_chroma > CX18_AV_COMPONENT_B_CHROMA8) { CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n", vid_input); return -EINVAL; } afe_mux_cfg = (luma - CX18_AV_COMPONENT_LUMA1) >> 12; ch[0] = Y; afe_mux_cfg |= (r_chroma - CX18_AV_COMPONENT_R_CHROMA4) >> 12; ch[1] = Pr; afe_mux_cfg |= (b_chroma - CX18_AV_COMPONENT_B_CHROMA7) >> 14; ch[2] = Pb; input_mode = 0x6; } else { int luma = vid_input & 0xf0; int chroma = vid_input & 0xf00; if ((vid_input & ~0xff0) || luma < CX18_AV_SVIDEO_LUMA1 || luma > CX18_AV_SVIDEO_LUMA8 || chroma < CX18_AV_SVIDEO_CHROMA4 || chroma > CX18_AV_SVIDEO_CHROMA8) { CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n", vid_input); return -EINVAL; } afe_mux_cfg = 0xf0 + ((luma - CX18_AV_SVIDEO_LUMA1) >> 4); ch[0] = Y; if (chroma >= CX18_AV_SVIDEO_CHROMA7) { afe_mux_cfg &= 0x3f; afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA7) >> 2; ch[2] = C; } else { afe_mux_cfg &= 0xcf; afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4; ch[1] = C; } input_mode = 0x2; } switch (aud_input) { case CX18_AV_AUDIO_SERIAL1: case CX18_AV_AUDIO_SERIAL2: /* do nothing, use serial audio input */ break; case CX18_AV_AUDIO4: afe_mux_cfg &= ~0x30; ch[1] = SIF; break; case CX18_AV_AUDIO5: afe_mux_cfg = (afe_mux_cfg & ~0x30) | 0x10; ch[1] = SIF; break; case CX18_AV_AUDIO6: afe_mux_cfg = (afe_mux_cfg & ~0x30) | 0x20; ch[1] = SIF; break; case CX18_AV_AUDIO7: afe_mux_cfg &= ~0xc0; ch[2] = SIF; break; case CX18_AV_AUDIO8: afe_mux_cfg = (afe_mux_cfg & ~0xc0) | 0x40; ch[2] = SIF; break; default: CX18_ERR_DEV(sd, "0x%04x is not a valid audio input!\n", aud_input); return -EINVAL; } /* Set up analog front end multiplexers */ cx18_av_write_expect(cx, 0x103, afe_mux_cfg, afe_mux_cfg, 0xf7); /* Set INPUT_MODE to Composite, S-Video, or Component */ cx18_av_and_or(cx, 0x401, ~0x6, input_mode); /* Set CH_SEL_ADC2 to 1 if input comes from CH3 */ adc2_cfg = cx18_av_read(cx, 0x102); if (ch[2] == NONE) adc2_cfg &= ~0x2; /* No sig on CH3, set ADC2 to CH2 for input */ else adc2_cfg |= 0x2; /* Signal on CH3, set ADC2 to CH3 for input */ /* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2 and CH3 */ if (ch[1] != NONE && ch[2] != NONE) adc2_cfg |= 0x4; /* Set dual mode */ else adc2_cfg &= ~0x4; /* Clear dual mode */ cx18_av_write_expect(cx, 0x102, adc2_cfg, adc2_cfg, 0x17); /* Configure the analog front end */ afe_cfg = cx18_av_read4(cx, CXADEC_AFE_CTRL); afe_cfg &= 0xff000000; afe_cfg |= 0x00005000; /* CHROMA_IN, AUD_IN: ADC2; LUMA_IN: ADC1 */ if (ch[1] != NONE && ch[2] != NONE) afe_cfg |= 0x00000030; /* half_bw_ch[2-3] since in dual mode */ for (i = 0; i < 3; i++) { switch (ch[i]) { default: case NONE: /* CLAMP_SEL = Fixed to midcode clamp level */ afe_cfg |= (0x00000200 << i); break; case CVBS: case Y: if (i > 0) afe_cfg |= 0x00002000; /* LUMA_IN_SEL: ADC2 */ break; case C: case Pb: case Pr: /* CLAMP_SEL = Fixed to midcode clamp level */ afe_cfg |= (0x00000200 << i); if (i == 0 && ch[i] == C) afe_cfg &= ~0x00001000; /* CHROMA_IN_SEL ADC1 */ break; case SIF: /* * VGA_GAIN_SEL = Audio Decoder * CLAMP_SEL = Fixed to midcode clamp level */ afe_cfg |= (0x00000240 << i); if (i == 0) afe_cfg &= ~0x00004000; /* AUD_IN_SEL ADC1 */ break; } } cx18_av_write4(cx, CXADEC_AFE_CTRL, afe_cfg); state->vid_input = vid_input; state->aud_input = aud_input; cx18_av_audio_set_path(cx); input_change(cx); return 0; } static int cx18_av_s_video_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); return set_input(cx, input, state->aud_input); } static int cx18_av_s_audio_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); return set_input(cx, state->vid_input, input); } static int cx18_av_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); u8 vpres; u8 mode; int val = 0; if (state->radio) return 0; vpres = cx18_av_read(cx, 0x40e) & 0x20; vt->signal = vpres ? 0xffff : 0x0; vt->capability |= V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP; mode = cx18_av_read(cx, 0x804); /* get rxsubchans and audmode */ if ((mode & 0xf) == 1) val |= V4L2_TUNER_SUB_STEREO; else val |= V4L2_TUNER_SUB_MONO; if (mode == 2 || mode == 4) val = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; if (mode & 0x10) val |= V4L2_TUNER_SUB_SAP; vt->rxsubchans = val; vt->audmode = state->audmode; return 0; } static int cx18_av_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); u8 v; if (state->radio) return 0; v = cx18_av_read(cx, 0x809); v &= ~0xf; switch (vt->audmode) { case V4L2_TUNER_MODE_MONO: /* mono -> mono stereo -> mono bilingual -> lang1 */ break; case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1: /* mono -> mono stereo -> stereo bilingual -> lang1 */ v |= 0x4; break; case V4L2_TUNER_MODE_LANG1_LANG2: /* mono -> mono stereo -> stereo bilingual -> lang1/lang2 */ v |= 0x7; break; case V4L2_TUNER_MODE_LANG2: /* mono -> mono stereo -> stereo bilingual -> lang2 */ v |= 0x1; break; default: return -EINVAL; } cx18_av_write_expect(cx, 0x809, v, v, 0xff); state->audmode = vt->audmode; return 0; } static int cx18_av_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); u8 fmt = 0; /* zero is autodetect */ u8 pal_m = 0; if (state->radio == 0 && state->std == norm) return 0; state->radio = 0; state->std = norm; /* First tests should be against specific std */ if (state->std == V4L2_STD_NTSC_M_JP) { fmt = 0x2; } else if (state->std == V4L2_STD_NTSC_443) { fmt = 0x3; } else if (state->std == V4L2_STD_PAL_M) { pal_m = 1; fmt = 0x5; } else if (state->std == V4L2_STD_PAL_N) { fmt = 0x6; } else if (state->std == V4L2_STD_PAL_Nc) { fmt = 0x7; } else if (state->std == V4L2_STD_PAL_60) { fmt = 0x8; } else { /* Then, test against generic ones */ if (state->std & V4L2_STD_NTSC) fmt = 0x1; else if (state->std & V4L2_STD_PAL) fmt = 0x4; else if (state->std & V4L2_STD_SECAM) fmt = 0xc; } CX18_DEBUG_INFO_DEV(sd, "changing video std to fmt %i\n", fmt); /* Follow step 9 of section 3.16 in the cx18_av datasheet. Without this PAL may display a vertical ghosting effect. This happens for example with the Yuan MPC622. */ if (fmt >= 4 && fmt < 8) { /* Set format to NTSC-M */ cx18_av_and_or(cx, 0x400, ~0xf, 1); /* Turn off LCOMB */ cx18_av_and_or(cx, 0x47b, ~6, 0); } cx18_av_and_or(cx, 0x400, ~0x2f, fmt | 0x20); cx18_av_and_or(cx, 0x403, ~0x3, pal_m); cx18_av_std_setup(cx); input_change(cx); return 0; } static int cx18_av_s_radio(struct v4l2_subdev *sd) { struct cx18_av_state *state = to_cx18_av_state(sd); state->radio = 1; return 0; } static int cx18_av_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct cx18 *cx = v4l2_get_subdevdata(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: cx18_av_write(cx, 0x414, ctrl->val - 128); break; case V4L2_CID_CONTRAST: cx18_av_write(cx, 0x415, ctrl->val << 1); break; case V4L2_CID_SATURATION: cx18_av_write(cx, 0x420, ctrl->val << 1); cx18_av_write(cx, 0x421, ctrl->val << 1); break; case V4L2_CID_HUE: cx18_av_write(cx, 0x422, ctrl->val); break; default: return -EINVAL; } return 0; } static int cx18_av_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); int HSC, VSC, Vsrc, Hsrc, filter, Vlines; int is_50Hz = !(state->std & V4L2_STD_525_60); if (fmt->code != V4L2_MBUS_FMT_FIXED) return -EINVAL; fmt->field = V4L2_FIELD_INTERLACED; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; Vsrc = (cx18_av_read(cx, 0x476) & 0x3f) << 4; Vsrc |= (cx18_av_read(cx, 0x475) & 0xf0) >> 4; Hsrc = (cx18_av_read(cx, 0x472) & 0x3f) << 4; Hsrc |= (cx18_av_read(cx, 0x471) & 0xf0) >> 4; /* * This adjustment reflects the excess of vactive, set in * cx18_av_std_setup(), above standard values: * * 480 + 1 for 60 Hz systems * 576 + 3 for 50 Hz systems */ Vlines = fmt->height + (is_50Hz ? 3 : 1); /* * Invalid height and width scaling requests are: * 1. width less than 1/16 of the source width * 2. width greater than the source width * 3. height less than 1/8 of the source height * 4. height greater than the source height */ if ((fmt->width * 16 < Hsrc) || (Hsrc < fmt->width) || (Vlines * 8 < Vsrc) || (Vsrc < Vlines)) { CX18_ERR_DEV(sd, "%dx%d is not a valid size!\n", fmt->width, fmt->height); return -ERANGE; } HSC = (Hsrc * (1 << 20)) / fmt->width - (1 << 20); VSC = (1 << 16) - (Vsrc * (1 << 9) / Vlines - (1 << 9)); VSC &= 0x1fff; if (fmt->width >= 385) filter = 0; else if (fmt->width > 192) filter = 1; else if (fmt->width > 96) filter = 2; else filter = 3; CX18_DEBUG_INFO_DEV(sd, "decoder set size %dx%d -> scale %ux%u\n", fmt->width, fmt->height, HSC, VSC); /* HSCALE=HSC */ cx18_av_write(cx, 0x418, HSC & 0xff); cx18_av_write(cx, 0x419, (HSC >> 8) & 0xff); cx18_av_write(cx, 0x41a, HSC >> 16); /* VSCALE=VSC */ cx18_av_write(cx, 0x41c, VSC & 0xff); cx18_av_write(cx, 0x41d, VSC >> 8); /* VS_INTRLACE=1 VFILT=filter */ cx18_av_write(cx, 0x41e, 0x8 | filter); return 0; } static int cx18_av_s_stream(struct v4l2_subdev *sd, int enable) { struct cx18 *cx = v4l2_get_subdevdata(sd); CX18_DEBUG_INFO_DEV(sd, "%s output\n", enable ? "enable" : "disable"); if (enable) { cx18_av_write(cx, 0x115, 0x8c); cx18_av_write(cx, 0x116, 0x07); } else { cx18_av_write(cx, 0x115, 0x00); cx18_av_write(cx, 0x116, 0x00); } return 0; } static void log_video_status(struct cx18 *cx) { static const char *const fmt_strs[] = { "0x0", "NTSC-M", "NTSC-J", "NTSC-4.43", "PAL-BDGHI", "PAL-M", "PAL-N", "PAL-Nc", "PAL-60", "0x9", "0xA", "0xB", "SECAM", "0xD", "0xE", "0xF" }; struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; u8 vidfmt_sel = cx18_av_read(cx, 0x400) & 0xf; u8 gen_stat1 = cx18_av_read(cx, 0x40d); u8 gen_stat2 = cx18_av_read(cx, 0x40e); int vid_input = state->vid_input; CX18_INFO_DEV(sd, "Video signal: %spresent\n", (gen_stat2 & 0x20) ? "" : "not "); CX18_INFO_DEV(sd, "Detected format: %s\n", fmt_strs[gen_stat1 & 0xf]); CX18_INFO_DEV(sd, "Specified standard: %s\n", vidfmt_sel ? fmt_strs[vidfmt_sel] : "automatic detection"); if (vid_input >= CX18_AV_COMPOSITE1 && vid_input <= CX18_AV_COMPOSITE8) { CX18_INFO_DEV(sd, "Specified video input: Composite %d\n", vid_input - CX18_AV_COMPOSITE1 + 1); } else { CX18_INFO_DEV(sd, "Specified video input: " "S-Video (Luma In%d, Chroma In%d)\n", (vid_input & 0xf0) >> 4, (vid_input & 0xf00) >> 8); } CX18_INFO_DEV(sd, "Specified audioclock freq: %d Hz\n", state->audclk_freq); } static void log_audio_status(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; u8 download_ctl = cx18_av_read(cx, 0x803); u8 mod_det_stat0 = cx18_av_read(cx, 0x804); u8 mod_det_stat1 = cx18_av_read(cx, 0x805); u8 audio_config = cx18_av_read(cx, 0x808); u8 pref_mode = cx18_av_read(cx, 0x809); u8 afc0 = cx18_av_read(cx, 0x80b); u8 mute_ctl = cx18_av_read(cx, 0x8d3); int aud_input = state->aud_input; char *p; switch (mod_det_stat0) { case 0x00: p = "mono"; break; case 0x01: p = "stereo"; break; case 0x02: p = "dual"; break; case 0x04: p = "tri"; break; case 0x10: p = "mono with SAP"; break; case 0x11: p = "stereo with SAP"; break; case 0x12: p = "dual with SAP"; break; case 0x14: p = "tri with SAP"; break; case 0xfe: p = "forced mode"; break; default: p = "not defined"; break; } CX18_INFO_DEV(sd, "Detected audio mode: %s\n", p); switch (mod_det_stat1) { case 0x00: p = "not defined"; break; case 0x01: p = "EIAJ"; break; case 0x02: p = "A2-M"; break; case 0x03: p = "A2-BG"; break; case 0x04: p = "A2-DK1"; break; case 0x05: p = "A2-DK2"; break; case 0x06: p = "A2-DK3"; break; case 0x07: p = "A1 (6.0 MHz FM Mono)"; break; case 0x08: p = "AM-L"; break; case 0x09: p = "NICAM-BG"; break; case 0x0a: p = "NICAM-DK"; break; case 0x0b: p = "NICAM-I"; break; case 0x0c: p = "NICAM-L"; break; case 0x0d: p = "BTSC/EIAJ/A2-M Mono (4.5 MHz FMMono)"; break; case 0x0e: p = "IF FM Radio"; break; case 0x0f: p = "BTSC"; break; case 0x10: p = "detected chrominance"; break; case 0xfd: p = "unknown audio standard"; break; case 0xfe: p = "forced audio standard"; break; case 0xff: p = "no detected audio standard"; break; default: p = "not defined"; break; } CX18_INFO_DEV(sd, "Detected audio standard: %s\n", p); CX18_INFO_DEV(sd, "Audio muted: %s\n", (mute_ctl & 0x2) ? "yes" : "no"); CX18_INFO_DEV(sd, "Audio microcontroller: %s\n", (download_ctl & 0x10) ? "running" : "stopped"); switch (audio_config >> 4) { case 0x00: p = "undefined"; break; case 0x01: p = "BTSC"; break; case 0x02: p = "EIAJ"; break; case 0x03: p = "A2-M"; break; case 0x04: p = "A2-BG"; break; case 0x05: p = "A2-DK1"; break; case 0x06: p = "A2-DK2"; break; case 0x07: p = "A2-DK3"; break; case 0x08: p = "A1 (6.0 MHz FM Mono)"; break; case 0x09: p = "AM-L"; break; case 0x0a: p = "NICAM-BG"; break; case 0x0b: p = "NICAM-DK"; break; case 0x0c: p = "NICAM-I"; break; case 0x0d: p = "NICAM-L"; break; case 0x0e: p = "FM radio"; break; case 0x0f: p = "automatic detection"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Configured audio standard: %s\n", p); if ((audio_config >> 4) < 0xF) { switch (audio_config & 0xF) { case 0x00: p = "MONO1 (LANGUAGE A/Mono L+R channel for BTSC, EIAJ, A2)"; break; case 0x01: p = "MONO2 (LANGUAGE B)"; break; case 0x02: p = "MONO3 (STEREO forced MONO)"; break; case 0x03: p = "MONO4 (NICAM ANALOG-Language C/Analog Fallback)"; break; case 0x04: p = "STEREO"; break; case 0x05: p = "DUAL1 (AC)"; break; case 0x06: p = "DUAL2 (BC)"; break; case 0x07: p = "DUAL3 (AB)"; break; default: p = "undefined"; } CX18_INFO_DEV(sd, "Configured audio mode: %s\n", p); } else { switch (audio_config & 0xF) { case 0x00: p = "BG"; break; case 0x01: p = "DK1"; break; case 0x02: p = "DK2"; break; case 0x03: p = "DK3"; break; case 0x04: p = "I"; break; case 0x05: p = "L"; break; case 0x06: p = "BTSC"; break; case 0x07: p = "EIAJ"; break; case 0x08: p = "A2-M"; break; case 0x09: p = "FM Radio (4.5 MHz)"; break; case 0x0a: p = "FM Radio (5.5 MHz)"; break; case 0x0b: p = "S-Video"; break; case 0x0f: p = "automatic standard and mode detection"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Configured audio system: %s\n", p); } if (aud_input) CX18_INFO_DEV(sd, "Specified audio input: Tuner (In%d)\n", aud_input); else CX18_INFO_DEV(sd, "Specified audio input: External\n"); switch (pref_mode & 0xf) { case 0: p = "mono/language A"; break; case 1: p = "language B"; break; case 2: p = "language C"; break; case 3: p = "analog fallback"; break; case 4: p = "stereo"; break; case 5: p = "language AC"; break; case 6: p = "language BC"; break; case 7: p = "language AB"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Preferred audio mode: %s\n", p); if ((audio_config & 0xf) == 0xf) { switch ((afc0 >> 3) & 0x1) { case 0: p = "system DK"; break; case 1: p = "system L"; break; } CX18_INFO_DEV(sd, "Selected 65 MHz format: %s\n", p); switch (afc0 & 0x7) { case 0: p = "Chroma"; break; case 1: p = "BTSC"; break; case 2: p = "EIAJ"; break; case 3: p = "A2-M"; break; case 4: p = "autodetect"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Selected 45 MHz format: %s\n", p); } } static int cx18_av_log_status(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); log_video_status(cx); log_audio_status(cx); return 0; } static inline int cx18_av_dbg_match(const struct v4l2_dbg_match *match) { return match->type == V4L2_CHIP_MATCH_HOST && match->addr == 1; } static int cx18_av_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct cx18_av_state *state = to_cx18_av_state(sd); if (cx18_av_dbg_match(&chip->match)) { chip->ident = state->id; chip->revision = state->rev; } return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int cx18_av_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct cx18 *cx = v4l2_get_subdevdata(sd); if (!cx18_av_dbg_match(&reg->match)) return -EINVAL; if ((reg->reg & 0x3) != 0) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; reg->size = 4; reg->val = cx18_av_read4(cx, reg->reg & 0x00000ffc); return 0; } static int cx18_av_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct cx18 *cx = v4l2_get_subdevdata(sd); if (!cx18_av_dbg_match(&reg->match)) return -EINVAL; if ((reg->reg & 0x3) != 0) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; cx18_av_write4(cx, reg->reg & 0x00000ffc, reg->val); return 0; } #endif static const struct v4l2_ctrl_ops cx18_av_ctrl_ops = { .s_ctrl = cx18_av_s_ctrl, }; static const struct v4l2_subdev_core_ops cx18_av_general_ops = { .g_chip_ident = cx18_av_g_chip_ident, .log_status = cx18_av_log_status, .load_fw = cx18_av_load_fw, .reset = cx18_av_reset, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, .s_std = cx18_av_s_std, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = cx18_av_g_register, .s_register = cx18_av_s_register, #endif }; static const struct v4l2_subdev_tuner_ops cx18_av_tuner_ops = { .s_radio = cx18_av_s_radio, .s_frequency = cx18_av_s_frequency, .g_tuner = cx18_av_g_tuner, .s_tuner = cx18_av_s_tuner, }; static const struct v4l2_subdev_audio_ops cx18_av_audio_ops = { .s_clock_freq = cx18_av_s_clock_freq, .s_routing = cx18_av_s_audio_routing, }; static const struct v4l2_subdev_video_ops cx18_av_video_ops = { .s_routing = cx18_av_s_video_routing, .s_stream = cx18_av_s_stream, .s_mbus_fmt = cx18_av_s_mbus_fmt, }; static const struct v4l2_subdev_vbi_ops cx18_av_vbi_ops = { .decode_vbi_line = cx18_av_decode_vbi_line, .g_sliced_fmt = cx18_av_g_sliced_fmt, .s_sliced_fmt = cx18_av_s_sliced_fmt, .s_raw_fmt = cx18_av_s_raw_fmt, }; static const struct v4l2_subdev_ops cx18_av_ops = { .core = &cx18_av_general_ops, .tuner = &cx18_av_tuner_ops, .audio = &cx18_av_audio_ops, .video = &cx18_av_video_ops, .vbi = &cx18_av_vbi_ops, }; int cx18_av_probe(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd; int err; state->rev = cx18_av_read4(cx, CXADEC_CHIP_CTRL) & 0xffff; state->id = ((state->rev >> 4) == CXADEC_CHIP_TYPE_MAKO) ? V4L2_IDENT_CX23418_843 : V4L2_IDENT_UNKNOWN; state->vid_input = CX18_AV_COMPOSITE7; state->aud_input = CX18_AV_AUDIO8; state->audclk_freq = 48000; state->audmode = V4L2_TUNER_MODE_LANG1; state->slicer_line_delay = 0; state->slicer_line_offset = (10 + state->slicer_line_delay - 2); sd = &state->sd; v4l2_subdev_init(sd, &cx18_av_ops); v4l2_set_subdevdata(sd, cx); snprintf(sd->name, sizeof(sd->name), "%s %03x", cx->v4l2_dev.name, (state->rev >> 4)); sd->grp_id = CX18_HW_418_AV; v4l2_ctrl_handler_init(&state->hdl, 9); v4l2_ctrl_new_std(&state->hdl, &cx18_av_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); v4l2_ctrl_new_std(&state->hdl, &cx18_av_ctrl_ops, V4L2_CID_CONTRAST, 0, 127, 1, 64); v4l2_ctrl_new_std(&state->hdl, &cx18_av_ctrl_ops, V4L2_CID_SATURATION, 0, 127, 1, 64); v4l2_ctrl_new_std(&state->hdl, &cx18_av_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); state->volume = v4l2_ctrl_new_std(&state->hdl, &cx18_av_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 65535, 65535 / 100, 0); v4l2_ctrl_new_std(&state->hdl, &cx18_av_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); v4l2_ctrl_new_std(&state->hdl, &cx18_av_audio_ctrl_ops, V4L2_CID_AUDIO_BALANCE, 0, 65535, 65535 / 100, 32768); v4l2_ctrl_new_std(&state->hdl, &cx18_av_audio_ctrl_ops, V4L2_CID_AUDIO_BASS, 0, 65535, 65535 / 100, 32768); v4l2_ctrl_new_std(&state->hdl, &cx18_av_audio_ctrl_ops, V4L2_CID_AUDIO_TREBLE, 0, 65535, 65535 / 100, 32768); sd->ctrl_handler = &state->hdl; if (state->hdl.error) { int err = state->hdl.error; v4l2_ctrl_handler_free(&state->hdl); return err; } err = v4l2_device_register_subdev(&cx->v4l2_dev, sd); if (err) v4l2_ctrl_handler_free(&state->hdl); else cx18_av_init(cx); return err; }
gpl-2.0
casinobrawl/dt2w_z2
arch/x86/power/hibernate_64.c
8340
4177
/* * Hibernation support for x86-64 * * Distribute under GPLv2 * * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> */ #include <linux/gfp.h> #include <linux/smp.h> #include <linux/suspend.h> #include <asm/proto.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mtrr.h> #include <asm/suspend.h> /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; /* Defined in hibernate_asm_64.S */ extern int restore_image(void); /* * Address to jump to in the last phase of restore in order to get to the image * kernel's text (this value is passed in the image header). */ unsigned long restore_jump_address; /* * Value of the cr3 register from before the hibernation (this value is passed * in the image header). */ unsigned long restore_cr3; pgd_t *temp_level4_pgt; void *relocated_restore_code; static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) { long i, j; i = pud_index(address); pud = pud + i; for (; i < PTRS_PER_PUD; pud++, i++) { unsigned long paddr; pmd_t *pmd; paddr = address + i*PUD_SIZE; if (paddr >= end) break; pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); if (!pmd) return -ENOMEM; set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { unsigned long pe; if (paddr >= end) break; pe = __PAGE_KERNEL_LARGE_EXEC | paddr; pe &= __supported_pte_mask; set_pmd(pmd, __pmd(pe)); } } return 0; } static int set_up_temporary_mappings(void) { unsigned long start, end, next; int error; temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); if (!temp_level4_pgt) return -ENOMEM; /* It is safe to reuse the original kernel mapping */ set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), init_level4_pgt[pgd_index(__START_KERNEL_map)]); /* Set up the direct mapping from scratch */ start = (unsigned long)pfn_to_kaddr(0); end = (unsigned long)pfn_to_kaddr(max_pfn); for (; start < end; start = next) { pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); if (!pud) return -ENOMEM; next = start + PGDIR_SIZE; if (next > end) next = end; if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) return error; set_pgd(temp_level4_pgt + pgd_index(start), mk_kernel_pgd(__pa(pud))); } return 0; } int swsusp_arch_resume(void) { int error; /* We have got enough memory and from now on we cannot recover */ if ((error = set_up_temporary_mappings())) return error; relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); if (!relocated_restore_code) return -ENOMEM; memcpy(relocated_restore_code, &core_restore_code, &restore_registers - &core_restore_code); restore_image(); return 0; } /* * pfn_is_nosave - check if given pfn is in the 'nosave' section */ int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); } struct restore_data_record { unsigned long jump_address; unsigned long cr3; unsigned long magic; }; #define RESTORE_MAGIC 0x0123456789ABCDEFUL /** * arch_hibernation_header_save - populate the architecture specific part * of a hibernation image header * @addr: address to save the data at */ int arch_hibernation_header_save(void *addr, unsigned int max_size) { struct restore_data_record *rdr = addr; if (max_size < sizeof(struct restore_data_record)) return -EOVERFLOW; rdr->jump_address = restore_jump_address; rdr->cr3 = restore_cr3; rdr->magic = RESTORE_MAGIC; return 0; } /** * arch_hibernation_header_restore - read the architecture specific data * from the hibernation image header * @addr: address to read the data from */ int arch_hibernation_header_restore(void *addr) { struct restore_data_record *rdr = addr; restore_jump_address = rdr->jump_address; restore_cr3 = rdr->cr3; return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; }
gpl-2.0
lehmanju/kernel_lenovo_lifetab_e10312
arch/x86/power/hibernate_64.c
8340
4177
/* * Hibernation support for x86-64 * * Distribute under GPLv2 * * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> */ #include <linux/gfp.h> #include <linux/smp.h> #include <linux/suspend.h> #include <asm/proto.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mtrr.h> #include <asm/suspend.h> /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; /* Defined in hibernate_asm_64.S */ extern int restore_image(void); /* * Address to jump to in the last phase of restore in order to get to the image * kernel's text (this value is passed in the image header). */ unsigned long restore_jump_address; /* * Value of the cr3 register from before the hibernation (this value is passed * in the image header). */ unsigned long restore_cr3; pgd_t *temp_level4_pgt; void *relocated_restore_code; static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) { long i, j; i = pud_index(address); pud = pud + i; for (; i < PTRS_PER_PUD; pud++, i++) { unsigned long paddr; pmd_t *pmd; paddr = address + i*PUD_SIZE; if (paddr >= end) break; pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); if (!pmd) return -ENOMEM; set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { unsigned long pe; if (paddr >= end) break; pe = __PAGE_KERNEL_LARGE_EXEC | paddr; pe &= __supported_pte_mask; set_pmd(pmd, __pmd(pe)); } } return 0; } static int set_up_temporary_mappings(void) { unsigned long start, end, next; int error; temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); if (!temp_level4_pgt) return -ENOMEM; /* It is safe to reuse the original kernel mapping */ set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), init_level4_pgt[pgd_index(__START_KERNEL_map)]); /* Set up the direct mapping from scratch */ start = (unsigned long)pfn_to_kaddr(0); end = (unsigned long)pfn_to_kaddr(max_pfn); for (; start < end; start = next) { pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); if (!pud) return -ENOMEM; next = start + PGDIR_SIZE; if (next > end) next = end; if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) return error; set_pgd(temp_level4_pgt + pgd_index(start), mk_kernel_pgd(__pa(pud))); } return 0; } int swsusp_arch_resume(void) { int error; /* We have got enough memory and from now on we cannot recover */ if ((error = set_up_temporary_mappings())) return error; relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); if (!relocated_restore_code) return -ENOMEM; memcpy(relocated_restore_code, &core_restore_code, &restore_registers - &core_restore_code); restore_image(); return 0; } /* * pfn_is_nosave - check if given pfn is in the 'nosave' section */ int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); } struct restore_data_record { unsigned long jump_address; unsigned long cr3; unsigned long magic; }; #define RESTORE_MAGIC 0x0123456789ABCDEFUL /** * arch_hibernation_header_save - populate the architecture specific part * of a hibernation image header * @addr: address to save the data at */ int arch_hibernation_header_save(void *addr, unsigned int max_size) { struct restore_data_record *rdr = addr; if (max_size < sizeof(struct restore_data_record)) return -EOVERFLOW; rdr->jump_address = restore_jump_address; rdr->cr3 = restore_cr3; rdr->magic = RESTORE_MAGIC; return 0; } /** * arch_hibernation_header_restore - read the architecture specific data * from the hibernation image header * @addr: address to read the data from */ int arch_hibernation_header_restore(void *addr) { struct restore_data_record *rdr = addr; restore_jump_address = rdr->jump_address; restore_cr3 = rdr->cr3; return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; }
gpl-2.0
Renzo-Olivares/android_kernel_htc_vigor
arch/powerpc/boot/mpc8xx.c
13972
1692
/* * MPC8xx support functions * * Author: Scott Wood <scottwood@freescale.com> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "types.h" #include "fsl-soc.h" #include "mpc8xx.h" #include "stdio.h" #include "io.h" #define MPC8XX_PLPRCR (0x284/4) /* PLL and Reset Control Register */ /* Return system clock from crystal frequency */ u32 mpc885_get_clock(u32 crystal) { u32 *immr; u32 plprcr; int mfi, mfn, mfd, pdf, div; u32 ret; immr = fsl_get_immr(); if (!immr) { printf("mpc885_get_clock: Couldn't get IMMR base.\r\n"); return 0; } plprcr = in_be32(&immr[MPC8XX_PLPRCR]); mfi = (plprcr >> 16) & 15; if (mfi < 5) { printf("Warning: PLPRCR[MFI] value of %d out-of-bounds\r\n", mfi); mfi = 5; } pdf = (plprcr >> 1) & 0xf; div = (plprcr >> 20) & 3; mfd = (plprcr >> 22) & 0x1f; mfn = (plprcr >> 27) & 0x1f; ret = crystal * mfi; if (mfn != 0) ret += crystal * mfn / (mfd + 1); return ret / (pdf + 1); } /* Set common device tree fields based on the given clock frequencies. */ void mpc8xx_set_clocks(u32 sysclk) { void *node; dt_fixup_cpu_clocks(sysclk, sysclk / 16, sysclk); node = finddevice("/soc/cpm"); if (node) setprop(node, "clock-frequency", &sysclk, 4); node = finddevice("/soc/cpm/brg"); if (node) setprop(node, "clock-frequency", &sysclk, 4); } int mpc885_fixup_clocks(u32 crystal) { u32 sysclk = mpc885_get_clock(crystal); if (!sysclk) return 0; mpc8xx_set_clocks(sysclk); return 1; }
gpl-2.0
invisiblek/caf_kernel_msm
drivers/media/video/gspca/gl860/gl860-ov2640.c
14228
18105
/* Subdriver for the GL860 chip with the OV2640 sensor * Author Olivier LORIN, from Malmostoso's logs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* Sensor : OV2640 */ #include "gl860.h" static u8 dat_init1[] = "\x00\x41\x07\x6a\x06\x61\x0d\x6a" "\x10\x10\xc1\x01"; static u8 c61[] = {0x61}; /* expected */ static u8 c51[] = {0x51}; /* expected */ static u8 c50[] = {0x50}; /* expected */ static u8 c28[] = {0x28}; /* expected */ static u8 ca8[] = {0xa8}; /* expected */ static u8 dat_post[] = "\x00\x41\x07\x6a\x06\xef\x0d\x6a" "\x10\x10\xc1\x01"; static u8 dat_640[] = "\xd0\x01\xd1\x08\xd2\xe0\xd3\x02\xd4\x10\xd5\x81"; static u8 dat_800[] = "\xd0\x01\xd1\x10\xd2\x58\xd3\x02\xd4\x18\xd5\x21"; static u8 dat_1280[] = "\xd0\x01\xd1\x18\xd2\xc0\xd3\x02\xd4\x28\xd5\x01"; static u8 dat_1600[] = "\xd0\x01\xd1\x20\xd2\xb0\xd3\x02\xd4\x30\xd5\x41"; static struct validx tbl_init_at_startup[] = { {0x0000, 0x0000}, {0x0010, 0x0010}, {0x0008, 0x00c0}, {0x0001, 0x00c1}, {0x0001, 0x00c2}, {0x0020, 0x0006}, {0x006a, 0x000d}, {0x0050, 0x0000}, {0x0041, 0x0000}, {0x006a, 0x0007}, {0x0061, 0x0006}, {0x006a, 0x000d}, {0x0000, 0x00c0}, {0x0010, 0x0010}, {0x0001, 0x00c1}, {0x0041, 0x00c2}, {0x0004, 0x00d8}, {0x0012, 0x0004}, {0x0000, 0x0058}, {0x0041, 0x0000}, {0x0061, 0x0000}, }; static struct validx tbl_common[] = { {0x6000, 0x00ff}, {0x60ff, 0x002c}, {0x60df, 0x002e}, {0x6001, 0x00ff}, {0x6080, 0x0012}, {0x6000, 0x0000}, {0x6000, 0x0045}, {0x6000, 0x0010}, {0x6035, 0x003c}, {0x6000, 0x0011}, {0x6028, 0x0004}, {0x60e5, 0x0013}, {0x6088, 0x0014}, {0x600c, 0x002c}, {0x6078, 0x0033}, {0x60f7, 0x003b}, {0x6000, 0x003e}, {0x6011, 0x0043}, {0x6010, 0x0016}, {0x6082, 0x0039}, {0x6088, 0x0035}, {0x600a, 0x0022}, {0x6040, 0x0037}, {0x6000, 0x0023}, {0x60a0, 0x0034}, {0x601a, 0x0036}, {0x6002, 0x0006}, {0x60c0, 0x0007}, {0x60b7, 0x000d}, {0x6001, 0x000e}, {0x6000, 0x004c}, {0x6081, 0x004a}, {0x6099, 0x0021}, {0x6002, 0x0009}, {0x603e, 0x0024}, {0x6034, 0x0025}, {0x6081, 0x0026}, {0x6000, 0x0000}, {0x6000, 0x0045}, {0x6000, 0x0010}, {0x6000, 0x005c}, {0x6000, 0x0063}, {0x6000, 0x007c}, {0x6070, 0x0061}, {0x6080, 0x0062}, {0x6080, 0x0020}, {0x6030, 0x0028}, {0x6000, 0x006c}, {0x6000, 0x006e}, {0x6002, 0x0070}, {0x6094, 0x0071}, {0x60c1, 0x0073}, {0x6034, 0x003d}, {0x6057, 0x005a}, {0x60bb, 0x004f}, {0x609c, 0x0050}, {0x6080, 0x006d}, {0x6002, 0x0039}, {0x6033, 0x003a}, {0x60f1, 0x003b}, {0x6031, 0x003c}, {0x6000, 0x00ff}, {0x6014, 0x00e0}, {0x60ff, 0x0076}, {0x60a0, 0x0033}, {0x6020, 0x0042}, {0x6018, 0x0043}, {0x6000, 0x004c}, {0x60d0, 0x0087}, {0x600f, 0x0088}, {0x6003, 0x00d7}, {0x6010, 0x00d9}, {0x6005, 0x00da}, {0x6082, 0x00d3}, {0x60c0, 0x00f9}, {0x6006, 0x0044}, {0x6007, 0x00d1}, {0x6002, 0x00d2}, {0x6000, 0x00d2}, {0x6011, 0x00d8}, {0x6008, 0x00c8}, {0x6080, 0x00c9}, {0x6008, 0x007c}, {0x6020, 0x007d}, {0x6020, 0x007d}, {0x6000, 0x0090}, {0x600e, 0x0091}, {0x601a, 0x0091}, {0x6031, 0x0091}, {0x605a, 0x0091}, {0x6069, 0x0091}, {0x6075, 0x0091}, {0x607e, 0x0091}, {0x6088, 0x0091}, {0x608f, 0x0091}, {0x6096, 0x0091}, {0x60a3, 0x0091}, {0x60af, 0x0091}, {0x60c4, 0x0091}, {0x60d7, 0x0091}, {0x60e8, 0x0091}, {0x6020, 0x0091}, {0x6000, 0x0092}, {0x6006, 0x0093}, {0x60e3, 0x0093}, {0x6005, 0x0093}, {0x6005, 0x0093}, {0x6000, 0x0093}, {0x6004, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0096}, {0x6008, 0x0097}, {0x6019, 0x0097}, {0x6002, 0x0097}, {0x600c, 0x0097}, {0x6024, 0x0097}, {0x6030, 0x0097}, {0x6028, 0x0097}, {0x6026, 0x0097}, {0x6002, 0x0097}, {0x6098, 0x0097}, {0x6080, 0x0097}, {0x6000, 0x0097}, {0x6000, 0x0097}, {0x60ed, 0x00c3}, {0x609a, 0x00c4}, {0x6000, 0x00a4}, {0x6011, 0x00c5}, {0x6051, 0x00c6}, {0x6010, 0x00c7}, {0x6066, 0x00b6}, {0x60a5, 0x00b8}, {0x6064, 0x00b7}, {0x607c, 0x00b9}, {0x60af, 0x00b3}, {0x6097, 0x00b4}, {0x60ff, 0x00b5}, {0x60c5, 0x00b0}, {0x6094, 0x00b1}, {0x600f, 0x00b2}, {0x605c, 0x00c4}, {0x6000, 0x00a8}, {0x60c8, 0x00c0}, {0x6096, 0x00c1}, {0x601d, 0x0086}, {0x6000, 0x0050}, {0x6090, 0x0051}, {0x6018, 0x0052}, {0x6000, 0x0053}, {0x6000, 0x0054}, {0x6088, 0x0055}, {0x6000, 0x0057}, {0x6090, 0x005a}, {0x6018, 0x005b}, {0x6005, 0x005c}, {0x60ed, 0x00c3}, {0x6000, 0x007f}, {0x6005, 0x00da}, {0x601f, 0x00e5}, {0x6067, 0x00e1}, {0x6000, 0x00e0}, {0x60ff, 0x00dd}, {0x6000, 0x0005}, {0x6001, 0x00ff}, {0x6000, 0x0000}, {0x6000, 0x0045}, {0x6000, 0x0010}, }; static struct validx tbl_sensor_settings_common1[] = { {0x0041, 0x0000}, {0x006a, 0x0007}, {0x00ef, 0x0006}, {0x006a, 0x000d}, {0x0000, 0x00c0}, {0x0010, 0x0010}, {0x0001, 0x00c1}, {0x0041, 0x00c2}, {0x0004, 0x00d8}, {0x0012, 0x0004}, {0x0000, 0x0058}, {0x0041, 0x0000}, {50, 0xffff}, {0x0061, 0x0000}, {0xffff, 0xffff}, {0x6000, 0x00ff}, {0x6000, 0x007c}, {0x6007, 0x007d}, {30, 0xffff}, {0x0040, 0x0000}, }; static struct validx tbl_sensor_settings_common2[] = { {0x6001, 0x00ff}, {0x6038, 0x000c}, {10, 0xffff}, {0x6000, 0x0011}, }; static struct validx tbl_640[] = { {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6067, 0x00e1}, {0x6004, 0x00da}, {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6001, 0x00ff}, {0x6000, 0x0012}, {0x6000, 0x0011}, {0x6011, 0x0017}, {0x6075, 0x0018}, {0x6001, 0x0019}, {0x6097, 0x001a}, {0x6036, 0x0032}, {0x60bb, 0x004f}, {0x6057, 0x005a}, {0x609c, 0x0050}, {0x6080, 0x006d}, {0x6092, 0x0026}, {0x60ff, 0x0020}, {0x6000, 0x0027}, {0x6000, 0x00ff}, {0x60c8, 0x00c0}, {0x6096, 0x00c1}, {0x6000, 0x008c}, {0x603d, 0x0086}, {0x6089, 0x0050}, {0x6090, 0x0051}, {0x602c, 0x0052}, {0x6000, 0x0053}, {0x6000, 0x0054}, {0x6088, 0x0055}, {0x6000, 0x0057}, {0x60a0, 0x005a}, {0x6078, 0x005b}, {0x6000, 0x005c}, {0x6004, 0x00d3}, {0x6000, 0x00e0}, {0x60ff, 0x00dd}, {0x60a1, 0x005a}, }; static struct validx tbl_800[] = { {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6067, 0x00e1}, {0x6004, 0x00da}, {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6001, 0x00ff}, {0x6040, 0x0012}, {0x6000, 0x0011}, {0x6011, 0x0017}, {0x6043, 0x0018}, {0x6000, 0x0019}, {0x604b, 0x001a}, {0x6009, 0x0032}, {0x60ca, 0x004f}, {0x60a8, 0x0050}, {0x6000, 0x006d}, {0x6038, 0x003d}, {0x60c8, 0x0035}, {0x6000, 0x0022}, {0x6092, 0x0026}, {0x60ff, 0x0020}, {0x6000, 0x0027}, {0x6000, 0x00ff}, {0x6064, 0x00c0}, {0x604b, 0x00c1}, {0x6000, 0x008c}, {0x601d, 0x0086}, {0x6082, 0x00d3}, {0x6000, 0x00e0}, {0x60ff, 0x00dd}, {0x6020, 0x008c}, {0x6001, 0x00ff}, {0x6044, 0x0018}, }; static struct validx tbl_big1[] = { {0x0002, 0x00c1}, {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6001, 0x00ff}, {0x6000, 0x0012}, {0x6000, 0x0000}, {0x6000, 0x0045}, {0x6000, 0x0010}, {0x6000, 0x0011}, {0x6011, 0x0017}, {0x6075, 0x0018}, {0x6001, 0x0019}, {0x6097, 0x001a}, {0x6036, 0x0032}, {0x60bb, 0x004f}, {0x609c, 0x0050}, {0x6057, 0x005a}, {0x6080, 0x006d}, {0x6043, 0x000f}, {0x608f, 0x0003}, {0x6005, 0x007c}, {0x6081, 0x0026}, {0x6000, 0x00ff}, {0x60c8, 0x00c0}, {0x6096, 0x00c1}, {0x6000, 0x008c}, }; static struct validx tbl_big2[] = { {0x603d, 0x0086}, {0x6000, 0x0050}, {0x6090, 0x0051}, {0x602c, 0x0052}, {0x6000, 0x0053}, {0x6000, 0x0054}, {0x6088, 0x0055}, {0x6000, 0x0057}, {0x6040, 0x005a}, {0x60f0, 0x005b}, {0x6001, 0x005c}, {0x6082, 0x00d3}, {0x6000, 0x008e}, }; static struct validx tbl_big3[] = { {0x6004, 0x00da}, {0x6000, 0x00e0}, {0x6067, 0x00e1}, {0x60ff, 0x00dd}, {0x6001, 0x00ff}, {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6001, 0x00ff}, {0x6000, 0x0011}, {0x6000, 0x00ff}, {0x6010, 0x00c7}, {0x6000, 0x0092}, {0x6006, 0x0093}, {0x60e3, 0x0093}, {0x6005, 0x0093}, {0x6005, 0x0093}, {0x60ed, 0x00c3}, {0x6000, 0x00a4}, {0x60d0, 0x0087}, {0x6003, 0x0096}, {0x600c, 0x0097}, {0x6024, 0x0097}, {0x6030, 0x0097}, {0x6028, 0x0097}, {0x6026, 0x0097}, {0x6002, 0x0097}, {0x6001, 0x00ff}, {0x6043, 0x000f}, {0x608f, 0x0003}, {0x6000, 0x002d}, {0x6000, 0x002e}, {0x600a, 0x0022}, {0x6002, 0x0070}, {0x6008, 0x0014}, {0x6048, 0x0014}, {0x6000, 0x00ff}, {0x6000, 0x00e0}, {0x60ff, 0x00dd}, }; static struct validx tbl_post_unset_alt[] = { {0x006a, 0x000d}, {0x6001, 0x00ff}, {0x6081, 0x0026}, {0x6000, 0x0000}, {0x6000, 0x0045}, {0x6000, 0x0010}, {0x6068, 0x000d}, {50, 0xffff}, {0x0021, 0x0000}, }; static int ov2640_init_at_startup(struct gspca_dev *gspca_dev); static int ov2640_configure_alt(struct gspca_dev *gspca_dev); static int ov2640_init_pre_alt(struct gspca_dev *gspca_dev); static int ov2640_init_post_alt(struct gspca_dev *gspca_dev); static void ov2640_post_unset_alt(struct gspca_dev *gspca_dev); static int ov2640_camera_settings(struct gspca_dev *gspca_dev); /*==========================================================================*/ void ov2640_init_settings(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; sd->vcur.backlight = 32; sd->vcur.brightness = 0; sd->vcur.sharpness = 6; sd->vcur.contrast = 0; sd->vcur.gamma = 32; sd->vcur.hue = 0; sd->vcur.saturation = 128; sd->vcur.whitebal = 64; sd->vcur.mirror = 0; sd->vcur.flip = 0; sd->vmax.backlight = 64; sd->vmax.brightness = 255; sd->vmax.sharpness = 31; sd->vmax.contrast = 255; sd->vmax.gamma = 64; sd->vmax.hue = 254 + 2; sd->vmax.saturation = 255; sd->vmax.whitebal = 128; sd->vmax.mirror = 1; sd->vmax.flip = 1; sd->vmax.AC50Hz = 0; sd->dev_camera_settings = ov2640_camera_settings; sd->dev_init_at_startup = ov2640_init_at_startup; sd->dev_configure_alt = ov2640_configure_alt; sd->dev_init_pre_alt = ov2640_init_pre_alt; sd->dev_post_unset_alt = ov2640_post_unset_alt; } /*==========================================================================*/ static void common(struct gspca_dev *gspca_dev) { fetch_validx(gspca_dev, tbl_common, ARRAY_SIZE(tbl_common)); } static int ov2640_init_at_startup(struct gspca_dev *gspca_dev) { fetch_validx(gspca_dev, tbl_init_at_startup, ARRAY_SIZE(tbl_init_at_startup)); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_init1); common(gspca_dev); ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0006, 1, c61); ctrl_out(gspca_dev, 0x40, 1, 0x00ef, 0x0006, 0, NULL); ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0000, 1, c51); ctrl_out(gspca_dev, 0x40, 1, 0x0051, 0x0000, 0, NULL); /* ctrl_out(gspca_dev, 0x40, 11, 0x0000, 0x0000, 0, NULL); */ return 0; } static int ov2640_init_pre_alt(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; sd->mirrorMask = 0; sd->vold.backlight = -1; sd->vold.brightness = -1; sd->vold.sharpness = -1; sd->vold.contrast = -1; sd->vold.saturation = -1; sd->vold.gamma = -1; sd->vold.hue = -1; sd->vold.whitebal = -1; sd->vold.mirror = -1; sd->vold.flip = -1; ov2640_init_post_alt(gspca_dev); return 0; } static int ov2640_init_post_alt(struct gspca_dev *gspca_dev) { s32 reso = gspca_dev->cam.cam_mode[(s32) gspca_dev->curr_mode].priv; s32 n; /* reserved for FETCH functions */ ctrl_out(gspca_dev, 0x40, 5, 0x0001, 0x0000, 0, NULL); n = fetch_validx(gspca_dev, tbl_sensor_settings_common1, ARRAY_SIZE(tbl_sensor_settings_common1)); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_post); common(gspca_dev); keep_on_fetching_validx(gspca_dev, tbl_sensor_settings_common1, ARRAY_SIZE(tbl_sensor_settings_common1), n); switch (reso) { case IMAGE_640: n = fetch_validx(gspca_dev, tbl_640, ARRAY_SIZE(tbl_640)); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_640); break; case IMAGE_800: n = fetch_validx(gspca_dev, tbl_800, ARRAY_SIZE(tbl_800)); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_800); break; case IMAGE_1600: case IMAGE_1280: n = fetch_validx(gspca_dev, tbl_big1, ARRAY_SIZE(tbl_big1)); if (reso == IMAGE_1280) { n = fetch_validx(gspca_dev, tbl_big2, ARRAY_SIZE(tbl_big2)); } else { ctrl_out(gspca_dev, 0x40, 1, 0x601d, 0x0086, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6001, 0x00d7, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6082, 0x00d3, 0, NULL); } n = fetch_validx(gspca_dev, tbl_big3, ARRAY_SIZE(tbl_big3)); if (reso == IMAGE_1280) { ctrl_out(gspca_dev, 0x40, 1, 0x6001, 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_1280); } else { ctrl_out(gspca_dev, 0x40, 1, 0x6020, 0x008c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6001, 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6076, 0x0018, 0, NULL); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_1600); } break; } n = fetch_validx(gspca_dev, tbl_sensor_settings_common2, ARRAY_SIZE(tbl_sensor_settings_common2)); ov2640_camera_settings(gspca_dev); return 0; } static int ov2640_configure_alt(struct gspca_dev *gspca_dev) { s32 reso = gspca_dev->cam.cam_mode[(s32) gspca_dev->curr_mode].priv; switch (reso) { case IMAGE_640: gspca_dev->alt = 3 + 1; break; case IMAGE_800: case IMAGE_1280: case IMAGE_1600: gspca_dev->alt = 1 + 1; break; } return 0; } static int ov2640_camera_settings(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; s32 backlight = sd->vcur.backlight; s32 bright = sd->vcur.brightness; s32 sharp = sd->vcur.sharpness; s32 gam = sd->vcur.gamma; s32 cntr = sd->vcur.contrast; s32 sat = sd->vcur.saturation; s32 hue = sd->vcur.hue; s32 wbal = sd->vcur.whitebal; s32 mirror = (((sd->vcur.mirror > 0) ^ sd->mirrorMask) == 0); s32 flip = (((sd->vcur.flip > 0) ^ sd->mirrorMask) == 0); if (backlight != sd->vold.backlight) { /* No sd->vold.backlight=backlight; (to be done again later) */ if (backlight < 0 || backlight > sd->vmax.backlight) backlight = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6001 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x601e + backlight , 0x0024, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x601e + backlight - 10, 0x0025, 0, NULL); } if (bright != sd->vold.brightness) { sd->vold.brightness = bright; if (bright < 0 || bright > sd->vmax.brightness) bright = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6009 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + bright, 0x007d, 0, NULL); } if (wbal != sd->vold.whitebal) { sd->vold.whitebal = wbal; if (wbal < 0 || wbal > sd->vmax.whitebal) wbal = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6003 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + wbal, 0x007d, 0, NULL); } if (cntr != sd->vold.contrast) { sd->vold.contrast = cntr; if (cntr < 0 || cntr > sd->vmax.contrast) cntr = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6007 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + cntr, 0x007d, 0, NULL); } if (sat != sd->vold.saturation) { sd->vold.saturation = sat; if (sat < 0 || sat > sd->vmax.saturation) sat = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6001 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + sat, 0x007d, 0, NULL); } if (sharp != sd->vold.sharpness) { sd->vold.sharpness = sharp; if (sharp < 0 || sharp > sd->vmax.sharpness) sharp = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6001 , 0x0092, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x60c0 + sharp, 0x0093, 0, NULL); } if (hue != sd->vold.hue) { sd->vold.hue = hue; if (hue < 0 || hue > sd->vmax.hue) hue = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6002 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + hue * (hue < 255), 0x007d, 0, NULL); if (hue >= 255) sd->swapRB = 1; else sd->swapRB = 0; } if (gam != sd->vold.gamma) { sd->vold.gamma = gam; if (gam < 0 || gam > sd->vmax.gamma) gam = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6008 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + gam, 0x007d, 0, NULL); } if (mirror != sd->vold.mirror || flip != sd->vold.flip) { sd->vold.mirror = mirror; sd->vold.flip = flip; mirror = 0x80 * mirror; ctrl_out(gspca_dev, 0x40, 1, 0x6001, 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000, 0x8004, 0, NULL); ctrl_in(gspca_dev, 0xc0, 2, 0x6000, 0x8004, 1, c28); ctrl_out(gspca_dev, 0x40, 1, 0x6028 + mirror, 0x0004, 0, NULL); flip = 0x50 * flip + mirror; ctrl_out(gspca_dev, 0x40, 1, 0x6001, 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000, 0x8004, 0, NULL); ctrl_in(gspca_dev, 0xc0, 2, 0x6000, 0x8004, 1, ca8); ctrl_out(gspca_dev, 0x40, 1, 0x6028 + flip, 0x0004, 0, NULL); ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0000, 1, c50); } if (backlight != sd->vold.backlight) { sd->vold.backlight = backlight; ctrl_out(gspca_dev, 0x40, 1, 0x6001 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x601e + backlight , 0x0024, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x601e + backlight - 10, 0x0025, 0, NULL); } return 0; } static void ov2640_post_unset_alt(struct gspca_dev *gspca_dev) { ctrl_out(gspca_dev, 0x40, 5, 0x0000, 0x0000, 0, NULL); msleep(20); fetch_validx(gspca_dev, tbl_post_unset_alt, ARRAY_SIZE(tbl_post_unset_alt)); }
gpl-2.0
Tof37/Es209ra-3.x
drivers/media/video/gspca/gl860/gl860-ov2640.c
14228
18105
/* Subdriver for the GL860 chip with the OV2640 sensor * Author Olivier LORIN, from Malmostoso's logs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* Sensor : OV2640 */ #include "gl860.h" static u8 dat_init1[] = "\x00\x41\x07\x6a\x06\x61\x0d\x6a" "\x10\x10\xc1\x01"; static u8 c61[] = {0x61}; /* expected */ static u8 c51[] = {0x51}; /* expected */ static u8 c50[] = {0x50}; /* expected */ static u8 c28[] = {0x28}; /* expected */ static u8 ca8[] = {0xa8}; /* expected */ static u8 dat_post[] = "\x00\x41\x07\x6a\x06\xef\x0d\x6a" "\x10\x10\xc1\x01"; static u8 dat_640[] = "\xd0\x01\xd1\x08\xd2\xe0\xd3\x02\xd4\x10\xd5\x81"; static u8 dat_800[] = "\xd0\x01\xd1\x10\xd2\x58\xd3\x02\xd4\x18\xd5\x21"; static u8 dat_1280[] = "\xd0\x01\xd1\x18\xd2\xc0\xd3\x02\xd4\x28\xd5\x01"; static u8 dat_1600[] = "\xd0\x01\xd1\x20\xd2\xb0\xd3\x02\xd4\x30\xd5\x41"; static struct validx tbl_init_at_startup[] = { {0x0000, 0x0000}, {0x0010, 0x0010}, {0x0008, 0x00c0}, {0x0001, 0x00c1}, {0x0001, 0x00c2}, {0x0020, 0x0006}, {0x006a, 0x000d}, {0x0050, 0x0000}, {0x0041, 0x0000}, {0x006a, 0x0007}, {0x0061, 0x0006}, {0x006a, 0x000d}, {0x0000, 0x00c0}, {0x0010, 0x0010}, {0x0001, 0x00c1}, {0x0041, 0x00c2}, {0x0004, 0x00d8}, {0x0012, 0x0004}, {0x0000, 0x0058}, {0x0041, 0x0000}, {0x0061, 0x0000}, }; static struct validx tbl_common[] = { {0x6000, 0x00ff}, {0x60ff, 0x002c}, {0x60df, 0x002e}, {0x6001, 0x00ff}, {0x6080, 0x0012}, {0x6000, 0x0000}, {0x6000, 0x0045}, {0x6000, 0x0010}, {0x6035, 0x003c}, {0x6000, 0x0011}, {0x6028, 0x0004}, {0x60e5, 0x0013}, {0x6088, 0x0014}, {0x600c, 0x002c}, {0x6078, 0x0033}, {0x60f7, 0x003b}, {0x6000, 0x003e}, {0x6011, 0x0043}, {0x6010, 0x0016}, {0x6082, 0x0039}, {0x6088, 0x0035}, {0x600a, 0x0022}, {0x6040, 0x0037}, {0x6000, 0x0023}, {0x60a0, 0x0034}, {0x601a, 0x0036}, {0x6002, 0x0006}, {0x60c0, 0x0007}, {0x60b7, 0x000d}, {0x6001, 0x000e}, {0x6000, 0x004c}, {0x6081, 0x004a}, {0x6099, 0x0021}, {0x6002, 0x0009}, {0x603e, 0x0024}, {0x6034, 0x0025}, {0x6081, 0x0026}, {0x6000, 0x0000}, {0x6000, 0x0045}, {0x6000, 0x0010}, {0x6000, 0x005c}, {0x6000, 0x0063}, {0x6000, 0x007c}, {0x6070, 0x0061}, {0x6080, 0x0062}, {0x6080, 0x0020}, {0x6030, 0x0028}, {0x6000, 0x006c}, {0x6000, 0x006e}, {0x6002, 0x0070}, {0x6094, 0x0071}, {0x60c1, 0x0073}, {0x6034, 0x003d}, {0x6057, 0x005a}, {0x60bb, 0x004f}, {0x609c, 0x0050}, {0x6080, 0x006d}, {0x6002, 0x0039}, {0x6033, 0x003a}, {0x60f1, 0x003b}, {0x6031, 0x003c}, {0x6000, 0x00ff}, {0x6014, 0x00e0}, {0x60ff, 0x0076}, {0x60a0, 0x0033}, {0x6020, 0x0042}, {0x6018, 0x0043}, {0x6000, 0x004c}, {0x60d0, 0x0087}, {0x600f, 0x0088}, {0x6003, 0x00d7}, {0x6010, 0x00d9}, {0x6005, 0x00da}, {0x6082, 0x00d3}, {0x60c0, 0x00f9}, {0x6006, 0x0044}, {0x6007, 0x00d1}, {0x6002, 0x00d2}, {0x6000, 0x00d2}, {0x6011, 0x00d8}, {0x6008, 0x00c8}, {0x6080, 0x00c9}, {0x6008, 0x007c}, {0x6020, 0x007d}, {0x6020, 0x007d}, {0x6000, 0x0090}, {0x600e, 0x0091}, {0x601a, 0x0091}, {0x6031, 0x0091}, {0x605a, 0x0091}, {0x6069, 0x0091}, {0x6075, 0x0091}, {0x607e, 0x0091}, {0x6088, 0x0091}, {0x608f, 0x0091}, {0x6096, 0x0091}, {0x60a3, 0x0091}, {0x60af, 0x0091}, {0x60c4, 0x0091}, {0x60d7, 0x0091}, {0x60e8, 0x0091}, {0x6020, 0x0091}, {0x6000, 0x0092}, {0x6006, 0x0093}, {0x60e3, 0x0093}, {0x6005, 0x0093}, {0x6005, 0x0093}, {0x6000, 0x0093}, {0x6004, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0093}, {0x6000, 0x0096}, {0x6008, 0x0097}, {0x6019, 0x0097}, {0x6002, 0x0097}, {0x600c, 0x0097}, {0x6024, 0x0097}, {0x6030, 0x0097}, {0x6028, 0x0097}, {0x6026, 0x0097}, {0x6002, 0x0097}, {0x6098, 0x0097}, {0x6080, 0x0097}, {0x6000, 0x0097}, {0x6000, 0x0097}, {0x60ed, 0x00c3}, {0x609a, 0x00c4}, {0x6000, 0x00a4}, {0x6011, 0x00c5}, {0x6051, 0x00c6}, {0x6010, 0x00c7}, {0x6066, 0x00b6}, {0x60a5, 0x00b8}, {0x6064, 0x00b7}, {0x607c, 0x00b9}, {0x60af, 0x00b3}, {0x6097, 0x00b4}, {0x60ff, 0x00b5}, {0x60c5, 0x00b0}, {0x6094, 0x00b1}, {0x600f, 0x00b2}, {0x605c, 0x00c4}, {0x6000, 0x00a8}, {0x60c8, 0x00c0}, {0x6096, 0x00c1}, {0x601d, 0x0086}, {0x6000, 0x0050}, {0x6090, 0x0051}, {0x6018, 0x0052}, {0x6000, 0x0053}, {0x6000, 0x0054}, {0x6088, 0x0055}, {0x6000, 0x0057}, {0x6090, 0x005a}, {0x6018, 0x005b}, {0x6005, 0x005c}, {0x60ed, 0x00c3}, {0x6000, 0x007f}, {0x6005, 0x00da}, {0x601f, 0x00e5}, {0x6067, 0x00e1}, {0x6000, 0x00e0}, {0x60ff, 0x00dd}, {0x6000, 0x0005}, {0x6001, 0x00ff}, {0x6000, 0x0000}, {0x6000, 0x0045}, {0x6000, 0x0010}, }; static struct validx tbl_sensor_settings_common1[] = { {0x0041, 0x0000}, {0x006a, 0x0007}, {0x00ef, 0x0006}, {0x006a, 0x000d}, {0x0000, 0x00c0}, {0x0010, 0x0010}, {0x0001, 0x00c1}, {0x0041, 0x00c2}, {0x0004, 0x00d8}, {0x0012, 0x0004}, {0x0000, 0x0058}, {0x0041, 0x0000}, {50, 0xffff}, {0x0061, 0x0000}, {0xffff, 0xffff}, {0x6000, 0x00ff}, {0x6000, 0x007c}, {0x6007, 0x007d}, {30, 0xffff}, {0x0040, 0x0000}, }; static struct validx tbl_sensor_settings_common2[] = { {0x6001, 0x00ff}, {0x6038, 0x000c}, {10, 0xffff}, {0x6000, 0x0011}, }; static struct validx tbl_640[] = { {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6067, 0x00e1}, {0x6004, 0x00da}, {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6001, 0x00ff}, {0x6000, 0x0012}, {0x6000, 0x0011}, {0x6011, 0x0017}, {0x6075, 0x0018}, {0x6001, 0x0019}, {0x6097, 0x001a}, {0x6036, 0x0032}, {0x60bb, 0x004f}, {0x6057, 0x005a}, {0x609c, 0x0050}, {0x6080, 0x006d}, {0x6092, 0x0026}, {0x60ff, 0x0020}, {0x6000, 0x0027}, {0x6000, 0x00ff}, {0x60c8, 0x00c0}, {0x6096, 0x00c1}, {0x6000, 0x008c}, {0x603d, 0x0086}, {0x6089, 0x0050}, {0x6090, 0x0051}, {0x602c, 0x0052}, {0x6000, 0x0053}, {0x6000, 0x0054}, {0x6088, 0x0055}, {0x6000, 0x0057}, {0x60a0, 0x005a}, {0x6078, 0x005b}, {0x6000, 0x005c}, {0x6004, 0x00d3}, {0x6000, 0x00e0}, {0x60ff, 0x00dd}, {0x60a1, 0x005a}, }; static struct validx tbl_800[] = { {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6067, 0x00e1}, {0x6004, 0x00da}, {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6001, 0x00ff}, {0x6040, 0x0012}, {0x6000, 0x0011}, {0x6011, 0x0017}, {0x6043, 0x0018}, {0x6000, 0x0019}, {0x604b, 0x001a}, {0x6009, 0x0032}, {0x60ca, 0x004f}, {0x60a8, 0x0050}, {0x6000, 0x006d}, {0x6038, 0x003d}, {0x60c8, 0x0035}, {0x6000, 0x0022}, {0x6092, 0x0026}, {0x60ff, 0x0020}, {0x6000, 0x0027}, {0x6000, 0x00ff}, {0x6064, 0x00c0}, {0x604b, 0x00c1}, {0x6000, 0x008c}, {0x601d, 0x0086}, {0x6082, 0x00d3}, {0x6000, 0x00e0}, {0x60ff, 0x00dd}, {0x6020, 0x008c}, {0x6001, 0x00ff}, {0x6044, 0x0018}, }; static struct validx tbl_big1[] = { {0x0002, 0x00c1}, {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6001, 0x00ff}, {0x6000, 0x0012}, {0x6000, 0x0000}, {0x6000, 0x0045}, {0x6000, 0x0010}, {0x6000, 0x0011}, {0x6011, 0x0017}, {0x6075, 0x0018}, {0x6001, 0x0019}, {0x6097, 0x001a}, {0x6036, 0x0032}, {0x60bb, 0x004f}, {0x609c, 0x0050}, {0x6057, 0x005a}, {0x6080, 0x006d}, {0x6043, 0x000f}, {0x608f, 0x0003}, {0x6005, 0x007c}, {0x6081, 0x0026}, {0x6000, 0x00ff}, {0x60c8, 0x00c0}, {0x6096, 0x00c1}, {0x6000, 0x008c}, }; static struct validx tbl_big2[] = { {0x603d, 0x0086}, {0x6000, 0x0050}, {0x6090, 0x0051}, {0x602c, 0x0052}, {0x6000, 0x0053}, {0x6000, 0x0054}, {0x6088, 0x0055}, {0x6000, 0x0057}, {0x6040, 0x005a}, {0x60f0, 0x005b}, {0x6001, 0x005c}, {0x6082, 0x00d3}, {0x6000, 0x008e}, }; static struct validx tbl_big3[] = { {0x6004, 0x00da}, {0x6000, 0x00e0}, {0x6067, 0x00e1}, {0x60ff, 0x00dd}, {0x6001, 0x00ff}, {0x6000, 0x00ff}, {0x60f1, 0x00dd}, {0x6004, 0x00e0}, {0x6001, 0x00ff}, {0x6000, 0x0011}, {0x6000, 0x00ff}, {0x6010, 0x00c7}, {0x6000, 0x0092}, {0x6006, 0x0093}, {0x60e3, 0x0093}, {0x6005, 0x0093}, {0x6005, 0x0093}, {0x60ed, 0x00c3}, {0x6000, 0x00a4}, {0x60d0, 0x0087}, {0x6003, 0x0096}, {0x600c, 0x0097}, {0x6024, 0x0097}, {0x6030, 0x0097}, {0x6028, 0x0097}, {0x6026, 0x0097}, {0x6002, 0x0097}, {0x6001, 0x00ff}, {0x6043, 0x000f}, {0x608f, 0x0003}, {0x6000, 0x002d}, {0x6000, 0x002e}, {0x600a, 0x0022}, {0x6002, 0x0070}, {0x6008, 0x0014}, {0x6048, 0x0014}, {0x6000, 0x00ff}, {0x6000, 0x00e0}, {0x60ff, 0x00dd}, }; static struct validx tbl_post_unset_alt[] = { {0x006a, 0x000d}, {0x6001, 0x00ff}, {0x6081, 0x0026}, {0x6000, 0x0000}, {0x6000, 0x0045}, {0x6000, 0x0010}, {0x6068, 0x000d}, {50, 0xffff}, {0x0021, 0x0000}, }; static int ov2640_init_at_startup(struct gspca_dev *gspca_dev); static int ov2640_configure_alt(struct gspca_dev *gspca_dev); static int ov2640_init_pre_alt(struct gspca_dev *gspca_dev); static int ov2640_init_post_alt(struct gspca_dev *gspca_dev); static void ov2640_post_unset_alt(struct gspca_dev *gspca_dev); static int ov2640_camera_settings(struct gspca_dev *gspca_dev); /*==========================================================================*/ void ov2640_init_settings(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; sd->vcur.backlight = 32; sd->vcur.brightness = 0; sd->vcur.sharpness = 6; sd->vcur.contrast = 0; sd->vcur.gamma = 32; sd->vcur.hue = 0; sd->vcur.saturation = 128; sd->vcur.whitebal = 64; sd->vcur.mirror = 0; sd->vcur.flip = 0; sd->vmax.backlight = 64; sd->vmax.brightness = 255; sd->vmax.sharpness = 31; sd->vmax.contrast = 255; sd->vmax.gamma = 64; sd->vmax.hue = 254 + 2; sd->vmax.saturation = 255; sd->vmax.whitebal = 128; sd->vmax.mirror = 1; sd->vmax.flip = 1; sd->vmax.AC50Hz = 0; sd->dev_camera_settings = ov2640_camera_settings; sd->dev_init_at_startup = ov2640_init_at_startup; sd->dev_configure_alt = ov2640_configure_alt; sd->dev_init_pre_alt = ov2640_init_pre_alt; sd->dev_post_unset_alt = ov2640_post_unset_alt; } /*==========================================================================*/ static void common(struct gspca_dev *gspca_dev) { fetch_validx(gspca_dev, tbl_common, ARRAY_SIZE(tbl_common)); } static int ov2640_init_at_startup(struct gspca_dev *gspca_dev) { fetch_validx(gspca_dev, tbl_init_at_startup, ARRAY_SIZE(tbl_init_at_startup)); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_init1); common(gspca_dev); ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0006, 1, c61); ctrl_out(gspca_dev, 0x40, 1, 0x00ef, 0x0006, 0, NULL); ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0000, 1, c51); ctrl_out(gspca_dev, 0x40, 1, 0x0051, 0x0000, 0, NULL); /* ctrl_out(gspca_dev, 0x40, 11, 0x0000, 0x0000, 0, NULL); */ return 0; } static int ov2640_init_pre_alt(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; sd->mirrorMask = 0; sd->vold.backlight = -1; sd->vold.brightness = -1; sd->vold.sharpness = -1; sd->vold.contrast = -1; sd->vold.saturation = -1; sd->vold.gamma = -1; sd->vold.hue = -1; sd->vold.whitebal = -1; sd->vold.mirror = -1; sd->vold.flip = -1; ov2640_init_post_alt(gspca_dev); return 0; } static int ov2640_init_post_alt(struct gspca_dev *gspca_dev) { s32 reso = gspca_dev->cam.cam_mode[(s32) gspca_dev->curr_mode].priv; s32 n; /* reserved for FETCH functions */ ctrl_out(gspca_dev, 0x40, 5, 0x0001, 0x0000, 0, NULL); n = fetch_validx(gspca_dev, tbl_sensor_settings_common1, ARRAY_SIZE(tbl_sensor_settings_common1)); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_post); common(gspca_dev); keep_on_fetching_validx(gspca_dev, tbl_sensor_settings_common1, ARRAY_SIZE(tbl_sensor_settings_common1), n); switch (reso) { case IMAGE_640: n = fetch_validx(gspca_dev, tbl_640, ARRAY_SIZE(tbl_640)); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_640); break; case IMAGE_800: n = fetch_validx(gspca_dev, tbl_800, ARRAY_SIZE(tbl_800)); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_800); break; case IMAGE_1600: case IMAGE_1280: n = fetch_validx(gspca_dev, tbl_big1, ARRAY_SIZE(tbl_big1)); if (reso == IMAGE_1280) { n = fetch_validx(gspca_dev, tbl_big2, ARRAY_SIZE(tbl_big2)); } else { ctrl_out(gspca_dev, 0x40, 1, 0x601d, 0x0086, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6001, 0x00d7, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6082, 0x00d3, 0, NULL); } n = fetch_validx(gspca_dev, tbl_big3, ARRAY_SIZE(tbl_big3)); if (reso == IMAGE_1280) { ctrl_out(gspca_dev, 0x40, 1, 0x6001, 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_1280); } else { ctrl_out(gspca_dev, 0x40, 1, 0x6020, 0x008c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6001, 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6076, 0x0018, 0, NULL); ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, dat_1600); } break; } n = fetch_validx(gspca_dev, tbl_sensor_settings_common2, ARRAY_SIZE(tbl_sensor_settings_common2)); ov2640_camera_settings(gspca_dev); return 0; } static int ov2640_configure_alt(struct gspca_dev *gspca_dev) { s32 reso = gspca_dev->cam.cam_mode[(s32) gspca_dev->curr_mode].priv; switch (reso) { case IMAGE_640: gspca_dev->alt = 3 + 1; break; case IMAGE_800: case IMAGE_1280: case IMAGE_1600: gspca_dev->alt = 1 + 1; break; } return 0; } static int ov2640_camera_settings(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; s32 backlight = sd->vcur.backlight; s32 bright = sd->vcur.brightness; s32 sharp = sd->vcur.sharpness; s32 gam = sd->vcur.gamma; s32 cntr = sd->vcur.contrast; s32 sat = sd->vcur.saturation; s32 hue = sd->vcur.hue; s32 wbal = sd->vcur.whitebal; s32 mirror = (((sd->vcur.mirror > 0) ^ sd->mirrorMask) == 0); s32 flip = (((sd->vcur.flip > 0) ^ sd->mirrorMask) == 0); if (backlight != sd->vold.backlight) { /* No sd->vold.backlight=backlight; (to be done again later) */ if (backlight < 0 || backlight > sd->vmax.backlight) backlight = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6001 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x601e + backlight , 0x0024, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x601e + backlight - 10, 0x0025, 0, NULL); } if (bright != sd->vold.brightness) { sd->vold.brightness = bright; if (bright < 0 || bright > sd->vmax.brightness) bright = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6009 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + bright, 0x007d, 0, NULL); } if (wbal != sd->vold.whitebal) { sd->vold.whitebal = wbal; if (wbal < 0 || wbal > sd->vmax.whitebal) wbal = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6003 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + wbal, 0x007d, 0, NULL); } if (cntr != sd->vold.contrast) { sd->vold.contrast = cntr; if (cntr < 0 || cntr > sd->vmax.contrast) cntr = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6007 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + cntr, 0x007d, 0, NULL); } if (sat != sd->vold.saturation) { sd->vold.saturation = sat; if (sat < 0 || sat > sd->vmax.saturation) sat = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6001 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + sat, 0x007d, 0, NULL); } if (sharp != sd->vold.sharpness) { sd->vold.sharpness = sharp; if (sharp < 0 || sharp > sd->vmax.sharpness) sharp = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6001 , 0x0092, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x60c0 + sharp, 0x0093, 0, NULL); } if (hue != sd->vold.hue) { sd->vold.hue = hue; if (hue < 0 || hue > sd->vmax.hue) hue = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6002 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + hue * (hue < 255), 0x007d, 0, NULL); if (hue >= 255) sd->swapRB = 1; else sd->swapRB = 0; } if (gam != sd->vold.gamma) { sd->vold.gamma = gam; if (gam < 0 || gam > sd->vmax.gamma) gam = 0; ctrl_out(gspca_dev, 0x40, 1, 0x6000 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6008 , 0x007c, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000 + gam, 0x007d, 0, NULL); } if (mirror != sd->vold.mirror || flip != sd->vold.flip) { sd->vold.mirror = mirror; sd->vold.flip = flip; mirror = 0x80 * mirror; ctrl_out(gspca_dev, 0x40, 1, 0x6001, 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000, 0x8004, 0, NULL); ctrl_in(gspca_dev, 0xc0, 2, 0x6000, 0x8004, 1, c28); ctrl_out(gspca_dev, 0x40, 1, 0x6028 + mirror, 0x0004, 0, NULL); flip = 0x50 * flip + mirror; ctrl_out(gspca_dev, 0x40, 1, 0x6001, 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x6000, 0x8004, 0, NULL); ctrl_in(gspca_dev, 0xc0, 2, 0x6000, 0x8004, 1, ca8); ctrl_out(gspca_dev, 0x40, 1, 0x6028 + flip, 0x0004, 0, NULL); ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0000, 1, c50); } if (backlight != sd->vold.backlight) { sd->vold.backlight = backlight; ctrl_out(gspca_dev, 0x40, 1, 0x6001 , 0x00ff, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x601e + backlight , 0x0024, 0, NULL); ctrl_out(gspca_dev, 0x40, 1, 0x601e + backlight - 10, 0x0025, 0, NULL); } return 0; } static void ov2640_post_unset_alt(struct gspca_dev *gspca_dev) { ctrl_out(gspca_dev, 0x40, 5, 0x0000, 0x0000, 0, NULL); msleep(20); fetch_validx(gspca_dev, tbl_post_unset_alt, ARRAY_SIZE(tbl_post_unset_alt)); }
gpl-2.0
xplodwild/android_kernel_asus_tf300t
drivers/gpu/drm/nouveau/nouveau_mem.c
149
23243
/* * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. * Copyright 2005 Stephane Marchesin * * The Weather Channel (TM) funded Tungsten Graphics to develop the * initial release of the Radeon 8500 driver under the XFree86 license. * This notice must be preserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Keith Whitwell <keith@tungstengraphics.com> */ #include "drmP.h" #include "drm.h" #include "drm_sarea.h" #include "nouveau_drv.h" #include "nouveau_pm.h" #include "nouveau_mm.h" #include "nouveau_vm.h" /* * NV10-NV40 tiling helpers */ static void nv10_mem_update_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, uint32_t addr, uint32_t size, uint32_t pitch, uint32_t flags) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; int i = tile - dev_priv->tile.reg; unsigned long save; nouveau_fence_unref(&tile->fence); if (tile->pitch) pfb->free_tile_region(dev, i); if (pitch) pfb->init_tile_region(dev, i, addr, size, pitch, flags); spin_lock_irqsave(&dev_priv->context_switch_lock, save); pfifo->reassign(dev, false); pfifo->cache_pull(dev, false); nouveau_wait_for_idle(dev); pfb->set_tile_region(dev, i); pgraph->set_tile_region(dev, i); pfifo->cache_pull(dev, true); pfifo->reassign(dev, true); spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); } static struct nouveau_tile_reg * nv10_mem_get_tile_region(struct drm_device *dev, int i) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; spin_lock(&dev_priv->tile.lock); if (!tile->used && (!tile->fence || nouveau_fence_signalled(tile->fence))) tile->used = true; else tile = NULL; spin_unlock(&dev_priv->tile.lock); return tile; } void nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, struct nouveau_fence *fence) { struct drm_nouveau_private *dev_priv = dev->dev_private; if (tile) { spin_lock(&dev_priv->tile.lock); if (fence) { /* Mark it as pending. */ tile->fence = fence; nouveau_fence_ref(fence); } tile->used = false; spin_unlock(&dev_priv->tile.lock); } } struct nouveau_tile_reg * nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, uint32_t pitch, uint32_t flags) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct nouveau_tile_reg *tile, *found = NULL; int i; for (i = 0; i < pfb->num_tiles; i++) { tile = nv10_mem_get_tile_region(dev, i); if (pitch && !found) { found = tile; continue; } else if (tile && tile->pitch) { /* Kill an unused tile region. */ nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0); } nv10_mem_put_tile_region(dev, tile, NULL); } if (found) nv10_mem_update_tile_region(dev, found, addr, size, pitch, flags); return found; } /* * Cleanup everything */ void nouveau_mem_vram_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; ttm_bo_device_release(&dev_priv->ttm.bdev); nouveau_ttm_global_release(dev_priv); if (dev_priv->fb_mtrr >= 0) { drm_mtrr_del(dev_priv->fb_mtrr, pci_resource_start(dev->pdev, 1), pci_resource_len(dev->pdev, 1), DRM_MTRR_WC); dev_priv->fb_mtrr = -1; } } void nouveau_mem_gart_fini(struct drm_device *dev) { nouveau_sgdma_takedown(dev); if (drm_core_has_AGP(dev) && dev->agp) { struct drm_agp_mem *entry, *tempe; /* Remove AGP resources, but leave dev->agp intact until drv_cleanup is called. */ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { if (entry->bound) drm_unbind_agp(entry->memory); drm_free_agp(entry->memory, entry->pages); kfree(entry); } INIT_LIST_HEAD(&dev->agp->memory); if (dev->agp->acquired) drm_agp_release(dev); dev->agp->acquired = 0; dev->agp->enabled = 0; } } static uint32_t nouveau_mem_detect_nv04(struct drm_device *dev) { uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0); if (boot0 & 0x00000100) return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) { case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB: return 32 * 1024 * 1024; case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB: return 16 * 1024 * 1024; case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB: return 8 * 1024 * 1024; case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB: return 4 * 1024 * 1024; } return 0; } static uint32_t nouveau_mem_detect_nforce(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct pci_dev *bridge; uint32_t mem; bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); if (!bridge) { NV_ERROR(dev, "no bridge device\n"); return 0; } if (dev_priv->flags & NV_NFORCE) { pci_read_config_dword(bridge, 0x7C, &mem); return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; } else if (dev_priv->flags & NV_NFORCE2) { pci_read_config_dword(bridge, 0x84, &mem); return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; } NV_ERROR(dev, "impossible!\n"); return 0; } int nouveau_mem_detect(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; if (dev_priv->card_type == NV_04) { dev_priv->vram_size = nouveau_mem_detect_nv04(dev); } else if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { dev_priv->vram_size = nouveau_mem_detect_nforce(dev); } else if (dev_priv->card_type < NV_50) { dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; } if (dev_priv->vram_size) return 0; return -ENOMEM; } bool nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags) { if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) return true; return false; } #if __OS_HAS_AGP static unsigned long get_agp_mode(struct drm_device *dev, unsigned long mode) { struct drm_nouveau_private *dev_priv = dev->dev_private; /* * FW seems to be broken on nv18, it makes the card lock up * randomly. */ if (dev_priv->chipset == 0x18) mode &= ~PCI_AGP_COMMAND_FW; /* * AGP mode set in the command line. */ if (nouveau_agpmode > 0) { bool agpv3 = mode & 0x8; int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode; mode = (mode & ~0x7) | (rate & 0x7); } return mode; } #endif int nouveau_mem_reset_agp(struct drm_device *dev) { #if __OS_HAS_AGP uint32_t saved_pci_nv_1, pmc_enable; int ret; /* First of all, disable fast writes, otherwise if it's * already enabled in the AGP bridge and we disable the card's * AGP controller we might be locking ourselves out of it. */ if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) | dev->agp->mode) & PCI_AGP_COMMAND_FW) { struct drm_agp_info info; struct drm_agp_mode mode; ret = drm_agp_info(dev, &info); if (ret) return ret; mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW; ret = drm_agp_enable(dev, mode); if (ret) return ret; } saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1); /* clear busmaster bit */ nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); /* disable AGP */ nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0); /* power cycle pgraph, if enabled */ pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE); if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { nv_wr32(dev, NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH); nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); } /* and restore (gives effect of resetting AGP) */ nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1); #endif return 0; } int nouveau_mem_init_agp(struct drm_device *dev) { #if __OS_HAS_AGP struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_agp_info info; struct drm_agp_mode mode; int ret; if (!dev->agp->acquired) { ret = drm_agp_acquire(dev); if (ret) { NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret); return ret; } } nouveau_mem_reset_agp(dev); ret = drm_agp_info(dev, &info); if (ret) { NV_ERROR(dev, "Unable to get AGP info: %d\n", ret); return ret; } /* see agp.h for the AGPSTAT_* modes available */ mode.mode = get_agp_mode(dev, info.mode); ret = drm_agp_enable(dev, mode); if (ret) { NV_ERROR(dev, "Unable to enable AGP: %d\n", ret); return ret; } dev_priv->gart_info.type = NOUVEAU_GART_AGP; dev_priv->gart_info.aper_base = info.aperture_base; dev_priv->gart_info.aper_size = info.aperture_size; #endif return 0; } int nouveau_mem_vram_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; int ret, dma_bits; dma_bits = 32; if (dev_priv->card_type >= NV_50) { if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) dma_bits = 40; } else if (drm_pci_device_is_pcie(dev) && dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) dma_bits = 39; } ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); if (ret) return ret; dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); ret = nouveau_ttm_global_init(dev_priv); if (ret) return ret; ret = ttm_bo_device_init(&dev_priv->ttm.bdev, dev_priv->ttm.bo_global_ref.ref.object, &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, dma_bits <= 32 ? true : false); if (ret) { NV_ERROR(dev, "Error initialising bo driver: %d\n", ret); return ret; } /* reserve space at end of VRAM for PRAMIN */ if (dev_priv->card_type >= NV_50) { dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024; } else if (dev_priv->card_type >= NV_40) { u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8); u32 rsvd; /* estimate grctx size, the magics come from nv40_grctx.c */ if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs; else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs; else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; else rsvd = 0x4a40 * vs; rsvd += 16 * 1024; rsvd *= dev_priv->engine.fifo.channels; /* pciegart table */ if (drm_pci_device_is_pcie(dev)) rsvd += 512 * 1024; /* object storage */ rsvd += 512 * 1024; dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096); } else { dev_priv->ramin_rsvd_vram = 512 * 1024; } ret = dev_priv->engine.vram.init(dev); if (ret) return ret; NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); if (dev_priv->vram_sys_base) { NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", dev_priv->vram_sys_base); } dev_priv->fb_available_size = dev_priv->vram_size; dev_priv->fb_mappable_pages = dev_priv->fb_available_size; if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1); dev_priv->fb_mappable_pages >>= PAGE_SHIFT; dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; dev_priv->fb_aper_free = dev_priv->fb_available_size; /* mappable vram */ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, dev_priv->fb_available_size >> PAGE_SHIFT); if (ret) { NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret); return ret; } if (dev_priv->card_type < NV_50) { ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, 0, 0, &dev_priv->vga_ram); if (ret == 0) ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM); if (ret) { NV_WARN(dev, "failed to reserve VGA memory\n"); nouveau_bo_ref(NULL, &dev_priv->vga_ram); } } dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), pci_resource_len(dev->pdev, 1), DRM_MTRR_WC); return 0; } int nouveau_mem_gart_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; int ret; dev_priv->gart_info.type = NOUVEAU_GART_NONE; #if !defined(__powerpc__) && !defined(__ia64__) if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) { ret = nouveau_mem_init_agp(dev); if (ret) NV_ERROR(dev, "Error initialising AGP: %d\n", ret); } #endif if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { ret = nouveau_sgdma_init(dev); if (ret) { NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret); return ret; } } NV_INFO(dev, "%d MiB GART (aperture)\n", (int)(dev_priv->gart_info.aper_size >> 20)); dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size; ret = ttm_bo_init_mm(bdev, TTM_PL_TT, dev_priv->gart_info.aper_size >> PAGE_SHIFT); if (ret) { NV_ERROR(dev, "Failed TT mm init: %d\n", ret); return ret; } return 0; } void nouveau_mem_timing_init(struct drm_device *dev) { /* cards < NVC0 only */ struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_memtimings *memtimings = &pm->memtimings; struct nvbios *bios = &dev_priv->vbios; struct bit_entry P; u8 tUNK_0, tUNK_1, tUNK_2; u8 tRP; /* Byte 3 */ u8 tRAS; /* Byte 5 */ u8 tRFC; /* Byte 7 */ u8 tRC; /* Byte 9 */ u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; u8 magic_number = 0; /* Yeah... sorry*/ u8 *mem = NULL, *entry; int i, recordlen, entries; if (bios->type == NVBIOS_BIT) { if (bit_table(dev, 'P', &P)) return; if (P.version == 1) mem = ROMPTR(bios, P.data[4]); else if (P.version == 2) mem = ROMPTR(bios, P.data[8]); else { NV_WARN(dev, "unknown mem for BIT P %d\n", P.version); } } else { NV_DEBUG(dev, "BMP version too old for memory\n"); return; } if (!mem) { NV_DEBUG(dev, "memory timing table pointer invalid\n"); return; } if (mem[0] != 0x10) { NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]); return; } /* validate record length */ entries = mem[2]; recordlen = mem[3]; if (recordlen < 15) { NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]); return; } /* parse vbios entries into common format */ memtimings->timing = kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); if (!memtimings->timing) return; /* Get "some number" from the timing reg for NV_40 * Used in calculations later */ if(dev_priv->card_type == NV_40) { magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24; } entry = mem + mem[1]; for (i = 0; i < entries; i++, entry += recordlen) { struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; if (entry[0] == 0) continue; tUNK_18 = 1; tUNK_19 = 1; tUNK_20 = 0; tUNK_21 = 0; switch (min(recordlen, 22)) { case 22: tUNK_21 = entry[21]; case 21: tUNK_20 = entry[20]; case 20: tUNK_19 = entry[19]; case 19: tUNK_18 = entry[18]; default: tUNK_0 = entry[0]; tUNK_1 = entry[1]; tUNK_2 = entry[2]; tRP = entry[3]; tRAS = entry[5]; tRFC = entry[7]; tRC = entry[9]; tUNK_10 = entry[10]; tUNK_11 = entry[11]; tUNK_12 = entry[12]; tUNK_13 = entry[13]; tUNK_14 = entry[14]; break; } timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP); /* XXX: I don't trust the -1's and +1's... they must come * from somewhere! */ timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 | tUNK_18 << 16 | (tUNK_1 + tUNK_19 + 1 + magic_number) << 8; if(dev_priv->chipset == 0xa8) { timing->reg_100224 |= (tUNK_2 - 1); } else { timing->reg_100224 |= (tUNK_2 + 2 - magic_number); } timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) { timing->reg_100228 |= (tUNK_19 - 1) << 24; } if(dev_priv->card_type == NV_40) { /* NV40: don't know what the rest of the regs are.. * And don't need to know either */ timing->reg_100228 |= 0x20200000 | magic_number << 24; } else if(dev_priv->card_type >= NV_50) { /* XXX: reg_10022c */ timing->reg_10022c = tUNK_2 - 1; timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | tUNK_13 << 8 | tUNK_13); timing->reg_100234 = (tRAS << 24 | tRC); timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; if(dev_priv->chipset < 0xa3) { timing->reg_100234 |= (tUNK_2 + 2) << 8; } else { /* XXX: +6? */ timing->reg_100234 |= (tUNK_19 + 6) << 8; } /* XXX; reg_100238, reg_10023c * reg_100238: 0x00?????? * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */ timing->reg_10023c = 0x202; if(dev_priv->chipset < 0xa3) { timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16; } else { /* currently unknown * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ } } NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, timing->reg_100220, timing->reg_100224, timing->reg_100228, timing->reg_10022c); NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", timing->reg_100230, timing->reg_100234, timing->reg_100238, timing->reg_10023c); } memtimings->nr_timing = entries; memtimings->supported = true; } void nouveau_mem_timing_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings; kfree(mem->timing); } static int nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size) { struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); struct nouveau_mm *mm; u64 size, block, rsvd; int ret; rsvd = (256 * 1024); /* vga memory */ size = (p_size << PAGE_SHIFT) - rsvd; block = dev_priv->vram_rblock_size; ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12); if (ret) return ret; man->priv = mm; return 0; } static int nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) { struct nouveau_mm *mm = man->priv; int ret; ret = nouveau_mm_fini(&mm); if (ret) return ret; man->priv = NULL; return 0; } static void nouveau_vram_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nouveau_mem *node = mem->mm_node; struct drm_device *dev = dev_priv->dev; if (node->tmp_vma.node) { nouveau_vm_unmap(&node->tmp_vma); nouveau_vm_put(&node->tmp_vma); } vram->put(dev, (struct nouveau_mem **)&mem->mm_node); } static int nouveau_vram_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) { struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct drm_device *dev = dev_priv->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_mem *node; u32 size_nc = 0; int ret; if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) size_nc = 1 << nvbo->vma.node->type; ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, mem->page_alignment << PAGE_SHIFT, size_nc, (nvbo->tile_flags >> 8) & 0x3ff, &node); if (ret) { mem->mm_node = NULL; return (ret == -ENOSPC) ? 0 : ret; } node->page_shift = 12; if (nvbo->vma.node) node->page_shift = nvbo->vma.node->type; mem->mm_node = node; mem->start = node->offset >> PAGE_SHIFT; return 0; } void nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) { struct nouveau_mm *mm = man->priv; struct nouveau_mm_node *r; u32 total = 0, free = 0; mutex_lock(&mm->mutex); list_for_each_entry(r, &mm->nodes, nl_entry) { printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n", prefix, r->type, ((u64)r->offset << 12), (((u64)r->offset + r->length) << 12)); total += r->length; if (!r->type) free += r->length; } mutex_unlock(&mm->mutex); printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n", prefix, (u64)total << 12, (u64)free << 12); printk(KERN_DEBUG "%s block: 0x%08x\n", prefix, mm->block_size << 12); } const struct ttm_mem_type_manager_func nouveau_vram_manager = { nouveau_vram_manager_init, nouveau_vram_manager_fini, nouveau_vram_manager_new, nouveau_vram_manager_del, nouveau_vram_manager_debug }; static int nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) { return 0; } static int nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) { return 0; } static void nouveau_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct nouveau_mem *node = mem->mm_node; if (node->tmp_vma.node) { nouveau_vm_unmap(&node->tmp_vma); nouveau_vm_put(&node->tmp_vma); } mem->mm_node = NULL; } static int nouveau_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) { struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_vma *vma = &nvbo->vma; struct nouveau_vm *vm = vma->vm; struct nouveau_mem *node; int ret; if (unlikely((mem->num_pages << PAGE_SHIFT) >= dev_priv->gart_info.aper_size)) return -ENOMEM; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; /* This node must be for evicting large-paged VRAM * to system memory. Due to a nv50 limitation of * not being able to mix large/small pages within * the same PDE, we need to create a temporary * small-paged VMA for the eviction. */ if (vma->node->type != vm->spg_shift) { ret = nouveau_vm_get(vm, (u64)vma->node->length << 12, vm->spg_shift, NV_MEM_ACCESS_RW, &node->tmp_vma); if (ret) { kfree(node); return ret; } } node->page_shift = nvbo->vma.node->type; mem->mm_node = node; mem->start = 0; return 0; } void nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) { } const struct ttm_mem_type_manager_func nouveau_gart_manager = { nouveau_gart_manager_init, nouveau_gart_manager_fini, nouveau_gart_manager_new, nouveau_gart_manager_del, nouveau_gart_manager_debug };
gpl-2.0
Brainiarc7/linux-3.18-parrot
drivers/regulator/st-pwm.c
149
4910
/* * Regulator driver for ST's PWM Regulators * * Copyright (C) 2014 - STMicroelectronics Inc. * * Author: Lee Jones <lee.jones@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pwm.h> #define ST_PWM_REG_PERIOD 8448 struct st_pwm_regulator_pdata { const struct regulator_desc *desc; struct st_pwm_voltages *duty_cycle_table; }; struct st_pwm_regulator_data { const struct st_pwm_regulator_pdata *pdata; struct pwm_device *pwm; bool enabled; int state; }; struct st_pwm_voltages { unsigned int uV; unsigned int dutycycle; }; static int st_pwm_regulator_get_voltage_sel(struct regulator_dev *dev) { struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev); return drvdata->state; } static int st_pwm_regulator_set_voltage_sel(struct regulator_dev *dev, unsigned selector) { struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev); int dutycycle; int ret; dutycycle = (ST_PWM_REG_PERIOD / 100) * drvdata->pdata->duty_cycle_table[selector].dutycycle; ret = pwm_config(drvdata->pwm, dutycycle, ST_PWM_REG_PERIOD); if (ret) { dev_err(&dev->dev, "Failed to configure PWM\n"); return ret; } drvdata->state = selector; if (!drvdata->enabled) { ret = pwm_enable(drvdata->pwm); if (ret) { dev_err(&dev->dev, "Failed to enable PWM\n"); return ret; } drvdata->enabled = true; } return 0; } static int st_pwm_regulator_list_voltage(struct regulator_dev *dev, unsigned selector) { struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev); if (selector >= dev->desc->n_voltages) return -EINVAL; return drvdata->pdata->duty_cycle_table[selector].uV; } static struct regulator_ops st_pwm_regulator_voltage_ops = { .set_voltage_sel = st_pwm_regulator_set_voltage_sel, .get_voltage_sel = st_pwm_regulator_get_voltage_sel, .list_voltage = st_pwm_regulator_list_voltage, .map_voltage = regulator_map_voltage_iterate, }; static struct st_pwm_voltages b2105_duty_cycle_table[] = { { .uV = 1114000, .dutycycle = 0, }, { .uV = 1095000, .dutycycle = 10, }, { .uV = 1076000, .dutycycle = 20, }, { .uV = 1056000, .dutycycle = 30, }, { .uV = 1036000, .dutycycle = 40, }, { .uV = 1016000, .dutycycle = 50, }, /* WARNING: Values above 50% duty-cycle cause boot failures. */ }; static const struct regulator_desc b2105_desc = { .name = "b2105-pwm-regulator", .ops = &st_pwm_regulator_voltage_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .n_voltages = ARRAY_SIZE(b2105_duty_cycle_table), .supply_name = "pwm", }; static const struct st_pwm_regulator_pdata b2105_info = { .desc = &b2105_desc, .duty_cycle_table = b2105_duty_cycle_table, }; static const struct of_device_id st_pwm_of_match[] = { { .compatible = "st,b2105-pwm-regulator", .data = &b2105_info, }, { }, }; MODULE_DEVICE_TABLE(of, st_pwm_of_match); static int st_pwm_regulator_probe(struct platform_device *pdev) { struct st_pwm_regulator_data *drvdata; struct regulator_dev *regulator; struct regulator_config config = { }; struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_match; if (!np) { dev_err(&pdev->dev, "Device Tree node missing\n"); return -EINVAL; } drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; of_match = of_match_device(st_pwm_of_match, &pdev->dev); if (!of_match) { dev_err(&pdev->dev, "failed to match of device\n"); return -ENODEV; } drvdata->pdata = of_match->data; config.init_data = of_get_regulator_init_data(&pdev->dev, np); if (!config.init_data) return -ENOMEM; config.of_node = np; config.dev = &pdev->dev; config.driver_data = drvdata; drvdata->pwm = devm_pwm_get(&pdev->dev, NULL); if (IS_ERR(drvdata->pwm)) { dev_err(&pdev->dev, "Failed to get PWM\n"); return PTR_ERR(drvdata->pwm); } regulator = devm_regulator_register(&pdev->dev, drvdata->pdata->desc, &config); if (IS_ERR(regulator)) { dev_err(&pdev->dev, "Failed to register regulator %s\n", drvdata->pdata->desc->name); return PTR_ERR(regulator); } return 0; } static struct platform_driver st_pwm_regulator_driver = { .driver = { .name = "st-pwm-regulator", .owner = THIS_MODULE, .of_match_table = of_match_ptr(st_pwm_of_match), }, .probe = st_pwm_regulator_probe, }; module_platform_driver(st_pwm_regulator_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>"); MODULE_DESCRIPTION("ST PWM Regulator Driver"); MODULE_ALIAS("platform:st_pwm-regulator");
gpl-2.0
XCage15/linux-1
drivers/gpu/drm/radeon/radeon_kms.c
149
28385
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include <drm/drmP.h> #include "radeon.h" #include <drm/radeon_drm.h> #include "radeon_asic.h" #include <linux/vga_switcheroo.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include "radeon_kfd.h" #if defined(CONFIG_VGA_SWITCHEROO) bool radeon_has_atpx(void); #else static inline bool radeon_has_atpx(void) { return false; } #endif /** * radeon_driver_unload_kms - Main unload function for KMS. * * @dev: drm dev pointer * * This is the main unload function for KMS (all asics). * It calls radeon_modeset_fini() to tear down the * displays, and radeon_device_fini() to tear down * the rest of the device (CP, writeback, etc.). * Returns 0 on success. */ int radeon_driver_unload_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; if (rdev == NULL) return 0; if (rdev->rmmio == NULL) goto done_free; pm_runtime_get_sync(dev->dev); radeon_kfd_device_fini(rdev); radeon_acpi_fini(rdev); radeon_modeset_fini(rdev); radeon_device_fini(rdev); done_free: kfree(rdev); dev->dev_private = NULL; return 0; } /** * radeon_driver_load_kms - Main load function for KMS. * * @dev: drm dev pointer * @flags: device flags * * This is the main load function for KMS (all asics). * It calls radeon_device_init() to set up the non-display * parts of the chip (asic init, CP, writeback, etc.), and * radeon_modeset_init() to set up the display parts * (crtcs, encoders, hotplug detect, etc.). * Returns 0 on success, error on failure. */ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct radeon_device *rdev; int r, acpi_status; rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); if (rdev == NULL) { return -ENOMEM; } dev->dev_private = (void *)rdev; /* update BUS flag */ if (drm_pci_device_is_agp(dev)) { flags |= RADEON_IS_AGP; } else if (pci_is_pcie(dev->pdev)) { flags |= RADEON_IS_PCIE; } else { flags |= RADEON_IS_PCI; } if ((radeon_runtime_pm != 0) && radeon_has_atpx() && ((flags & RADEON_IS_IGP) == 0)) flags |= RADEON_IS_PX; /* radeon_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must * properly initialize the GPU MC controller and permit * VRAM allocation */ r = radeon_device_init(rdev, dev, dev->pdev, flags); if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); goto out; } /* Again modeset_init should fail only on fatal error * otherwise it should provide enough functionalities * for shadowfb to run */ r = radeon_modeset_init(rdev); if (r) dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); /* Call ACPI methods: require modeset init * but failure is not fatal */ if (!r) { acpi_status = radeon_acpi_init(rdev); if (acpi_status) dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); } radeon_kfd_device_probe(rdev); radeon_kfd_device_init(rdev); if (radeon_is_px(dev)) { pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); pm_runtime_allow(dev->dev); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } out: if (r) radeon_driver_unload_kms(dev); return r; } /** * radeon_set_filp_rights - Set filp right. * * @dev: drm dev pointer * @owner: drm file * @applier: drm file * @value: value * * Sets the filp rights for the device (all asics). */ static void radeon_set_filp_rights(struct drm_device *dev, struct drm_file **owner, struct drm_file *applier, uint32_t *value) { mutex_lock(&dev->struct_mutex); if (*value == 1) { /* wants rights */ if (!*owner) *owner = applier; } else if (*value == 0) { /* revokes rights */ if (*owner == applier) *owner = NULL; } *value = *owner == applier ? 1 : 0; mutex_unlock(&dev->struct_mutex); } /* * Userspace get information ioctl */ /** * radeon_info_ioctl - answer a device specific request. * * @rdev: radeon device pointer * @data: request object * @filp: drm filp * * This function is used to pass device specific parameters to the userspace * drivers. Examples include: pci device id, pipeline parms, tiling params, * etc. (all asics). * Returns 0 on success, -EINVAL on failure. */ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_info *info = data; struct radeon_mode_info *minfo = &rdev->mode_info; uint32_t *value, value_tmp, *value_ptr, value_size; uint64_t value64; struct drm_crtc *crtc; int i, found; value_ptr = (uint32_t *)((unsigned long)info->value); value = &value_tmp; value_size = sizeof(uint32_t); switch (info->request) { case RADEON_INFO_DEVICE_ID: *value = dev->pdev->device; break; case RADEON_INFO_NUM_GB_PIPES: *value = rdev->num_gb_pipes; break; case RADEON_INFO_NUM_Z_PIPES: *value = rdev->num_z_pipes; break; case RADEON_INFO_ACCEL_WORKING: /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) *value = false; else *value = rdev->accel_working; break; case RADEON_INFO_CRTC_FROM_ID: if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); return -EFAULT; } for (i = 0, found = 0; i < rdev->num_crtc; i++) { crtc = (struct drm_crtc *)minfo->crtcs[i]; if (crtc && crtc->base.id == *value) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); *value = radeon_crtc->crtc_id; found = 1; break; } } if (!found) { DRM_DEBUG_KMS("unknown crtc id %d\n", *value); return -EINVAL; } break; case RADEON_INFO_ACCEL_WORKING2: if (rdev->family == CHIP_HAWAII) { if (rdev->accel_working) { if (rdev->new_fw) *value = 3; else *value = 2; } else { *value = 0; } } else { *value = rdev->accel_working; } break; case RADEON_INFO_TILING_CONFIG: if (rdev->family >= CHIP_BONAIRE) *value = rdev->config.cik.tile_config; else if (rdev->family >= CHIP_TAHITI) *value = rdev->config.si.tile_config; else if (rdev->family >= CHIP_CAYMAN) *value = rdev->config.cayman.tile_config; else if (rdev->family >= CHIP_CEDAR) *value = rdev->config.evergreen.tile_config; else if (rdev->family >= CHIP_RV770) *value = rdev->config.rv770.tile_config; else if (rdev->family >= CHIP_R600) *value = rdev->config.r600.tile_config; else { DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); return -EINVAL; } break; case RADEON_INFO_WANT_HYPERZ: /* The "value" here is both an input and output parameter. * If the input value is 1, filp requests hyper-z access. * If the input value is 0, filp revokes its hyper-z access. * * When returning, the value is 1 if filp owns hyper-z access, * 0 otherwise. */ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); return -EFAULT; } if (*value >= 2) { DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value); return -EINVAL; } radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value); break; case RADEON_INFO_WANT_CMASK: /* The same logic as Hyper-Z. */ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); return -EFAULT; } if (*value >= 2) { DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value); return -EINVAL; } radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value); break; case RADEON_INFO_CLOCK_CRYSTAL_FREQ: /* return clock value in KHz */ if (rdev->asic->get_xclk) *value = radeon_get_xclk(rdev) * 10; else *value = rdev->clock.spll.reference_freq * 10; break; case RADEON_INFO_NUM_BACKENDS: if (rdev->family >= CHIP_BONAIRE) *value = rdev->config.cik.max_backends_per_se * rdev->config.cik.max_shader_engines; else if (rdev->family >= CHIP_TAHITI) *value = rdev->config.si.max_backends_per_se * rdev->config.si.max_shader_engines; else if (rdev->family >= CHIP_CAYMAN) *value = rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines; else if (rdev->family >= CHIP_CEDAR) *value = rdev->config.evergreen.max_backends; else if (rdev->family >= CHIP_RV770) *value = rdev->config.rv770.max_backends; else if (rdev->family >= CHIP_R600) *value = rdev->config.r600.max_backends; else { return -EINVAL; } break; case RADEON_INFO_NUM_TILE_PIPES: if (rdev->family >= CHIP_BONAIRE) *value = rdev->config.cik.max_tile_pipes; else if (rdev->family >= CHIP_TAHITI) *value = rdev->config.si.max_tile_pipes; else if (rdev->family >= CHIP_CAYMAN) *value = rdev->config.cayman.max_tile_pipes; else if (rdev->family >= CHIP_CEDAR) *value = rdev->config.evergreen.max_tile_pipes; else if (rdev->family >= CHIP_RV770) *value = rdev->config.rv770.max_tile_pipes; else if (rdev->family >= CHIP_R600) *value = rdev->config.r600.max_tile_pipes; else { return -EINVAL; } break; case RADEON_INFO_FUSION_GART_WORKING: *value = 1; break; case RADEON_INFO_BACKEND_MAP: if (rdev->family >= CHIP_BONAIRE) *value = rdev->config.cik.backend_map; else if (rdev->family >= CHIP_TAHITI) *value = rdev->config.si.backend_map; else if (rdev->family >= CHIP_CAYMAN) *value = rdev->config.cayman.backend_map; else if (rdev->family >= CHIP_CEDAR) *value = rdev->config.evergreen.backend_map; else if (rdev->family >= CHIP_RV770) *value = rdev->config.rv770.backend_map; else if (rdev->family >= CHIP_R600) *value = rdev->config.r600.backend_map; else { return -EINVAL; } break; case RADEON_INFO_VA_START: /* this is where we report if vm is supported or not */ if (rdev->family < CHIP_CAYMAN) return -EINVAL; *value = RADEON_VA_RESERVED_SIZE; break; case RADEON_INFO_IB_VM_MAX_SIZE: /* this is where we report if vm is supported or not */ if (rdev->family < CHIP_CAYMAN) return -EINVAL; *value = RADEON_IB_VM_MAX_SIZE; break; case RADEON_INFO_MAX_PIPES: if (rdev->family >= CHIP_BONAIRE) *value = rdev->config.cik.max_cu_per_sh; else if (rdev->family >= CHIP_TAHITI) *value = rdev->config.si.max_cu_per_sh; else if (rdev->family >= CHIP_CAYMAN) *value = rdev->config.cayman.max_pipes_per_simd; else if (rdev->family >= CHIP_CEDAR) *value = rdev->config.evergreen.max_pipes; else if (rdev->family >= CHIP_RV770) *value = rdev->config.rv770.max_pipes; else if (rdev->family >= CHIP_R600) *value = rdev->config.r600.max_pipes; else { return -EINVAL; } break; case RADEON_INFO_TIMESTAMP: if (rdev->family < CHIP_R600) { DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); return -EINVAL; } value = (uint32_t*)&value64; value_size = sizeof(uint64_t); value64 = radeon_get_gpu_clock_counter(rdev); break; case RADEON_INFO_MAX_SE: if (rdev->family >= CHIP_BONAIRE) *value = rdev->config.cik.max_shader_engines; else if (rdev->family >= CHIP_TAHITI) *value = rdev->config.si.max_shader_engines; else if (rdev->family >= CHIP_CAYMAN) *value = rdev->config.cayman.max_shader_engines; else if (rdev->family >= CHIP_CEDAR) *value = rdev->config.evergreen.num_ses; else *value = 1; break; case RADEON_INFO_MAX_SH_PER_SE: if (rdev->family >= CHIP_BONAIRE) *value = rdev->config.cik.max_sh_per_se; else if (rdev->family >= CHIP_TAHITI) *value = rdev->config.si.max_sh_per_se; else return -EINVAL; break; case RADEON_INFO_FASTFB_WORKING: *value = rdev->fastfb_working; break; case RADEON_INFO_RING_WORKING: if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); return -EFAULT; } switch (*value) { case RADEON_CS_RING_GFX: case RADEON_CS_RING_COMPUTE: *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready; break; case RADEON_CS_RING_DMA: *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready; *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready; break; case RADEON_CS_RING_UVD: *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; break; case RADEON_CS_RING_VCE: *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready; break; default: return -EINVAL; } break; case RADEON_INFO_SI_TILE_MODE_ARRAY: if (rdev->family >= CHIP_BONAIRE) { value = rdev->config.cik.tile_mode_array; value_size = sizeof(uint32_t)*32; } else if (rdev->family >= CHIP_TAHITI) { value = rdev->config.si.tile_mode_array; value_size = sizeof(uint32_t)*32; } else { DRM_DEBUG_KMS("tile mode array is si+ only!\n"); return -EINVAL; } break; case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY: if (rdev->family >= CHIP_BONAIRE) { value = rdev->config.cik.macrotile_mode_array; value_size = sizeof(uint32_t)*16; } else { DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n"); return -EINVAL; } break; case RADEON_INFO_SI_CP_DMA_COMPUTE: *value = 1; break; case RADEON_INFO_SI_BACKEND_ENABLED_MASK: if (rdev->family >= CHIP_BONAIRE) { *value = rdev->config.cik.backend_enable_mask; } else if (rdev->family >= CHIP_TAHITI) { *value = rdev->config.si.backend_enable_mask; } else { DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); } break; case RADEON_INFO_MAX_SCLK: if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; else *value = rdev->pm.default_sclk * 10; break; case RADEON_INFO_VCE_FW_VERSION: *value = rdev->vce.fw_version; break; case RADEON_INFO_VCE_FB_VERSION: *value = rdev->vce.fb_version; break; case RADEON_INFO_NUM_BYTES_MOVED: value = (uint32_t*)&value64; value_size = sizeof(uint64_t); value64 = atomic64_read(&rdev->num_bytes_moved); break; case RADEON_INFO_VRAM_USAGE: value = (uint32_t*)&value64; value_size = sizeof(uint64_t); value64 = atomic64_read(&rdev->vram_usage); break; case RADEON_INFO_GTT_USAGE: value = (uint32_t*)&value64; value_size = sizeof(uint64_t); value64 = atomic64_read(&rdev->gtt_usage); break; case RADEON_INFO_ACTIVE_CU_COUNT: if (rdev->family >= CHIP_BONAIRE) *value = rdev->config.cik.active_cus; else if (rdev->family >= CHIP_TAHITI) *value = rdev->config.si.active_cus; else if (rdev->family >= CHIP_CAYMAN) *value = rdev->config.cayman.active_simds; else if (rdev->family >= CHIP_CEDAR) *value = rdev->config.evergreen.active_simds; else if (rdev->family >= CHIP_RV770) *value = rdev->config.rv770.active_simds; else if (rdev->family >= CHIP_R600) *value = rdev->config.r600.active_simds; else *value = 1; break; case RADEON_INFO_CURRENT_GPU_TEMP: /* get temperature in millidegrees C */ if (rdev->asic->pm.get_temperature) *value = radeon_get_temperature(rdev); else *value = 0; break; case RADEON_INFO_CURRENT_GPU_SCLK: /* get sclk in Mhz */ if (rdev->pm.dpm_enabled) *value = radeon_dpm_get_current_sclk(rdev) / 100; else *value = rdev->pm.current_sclk / 100; break; case RADEON_INFO_CURRENT_GPU_MCLK: /* get mclk in Mhz */ if (rdev->pm.dpm_enabled) *value = radeon_dpm_get_current_mclk(rdev) / 100; else *value = rdev->pm.current_mclk / 100; break; case RADEON_INFO_READ_REG: if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); return -EFAULT; } if (radeon_get_allowed_info_register(rdev, *value, value)) return -EINVAL; break; case RADEON_INFO_VA_UNMAP_WORKING: *value = true; break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; } if (copy_to_user(value_ptr, (char*)value, value_size)) { DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); return -EFAULT; } return 0; } /* * Outdated mess for old drm with Xorg being in charge (void function now). */ /** * radeon_driver_firstopen_kms - drm callback for last close * * @dev: drm dev pointer * * Switch vga switcheroo state after last close (all asics). */ void radeon_driver_lastclose_kms(struct drm_device *dev) { vga_switcheroo_process_delayed_switch(); } /** * radeon_driver_open_kms - drm callback for open * * @dev: drm dev pointer * @file_priv: drm file * * On device open, init vm on cayman+ (all asics). * Returns 0 on success, error on failure. */ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; int r; file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); if (r < 0) return r; /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { struct radeon_fpriv *fpriv; struct radeon_vm *vm; int r; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { return -ENOMEM; } if (rdev->accel_working) { vm = &fpriv->vm; r = radeon_vm_init(rdev, vm); if (r) { kfree(fpriv); return r; } r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (r) { radeon_vm_fini(rdev, vm); kfree(fpriv); return r; } /* map the ib pool buffer read only into * virtual address space */ vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo); r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, RADEON_VA_IB_OFFSET, RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); if (r) { radeon_vm_fini(rdev, vm); kfree(fpriv); return r; } } file_priv->driver_priv = fpriv; } pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; } /** * radeon_driver_postclose_kms - drm callback for post close * * @dev: drm dev pointer * @file_priv: drm file * * On device post close, tear down vm on cayman+ (all asics). */ void radeon_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_vm *vm = &fpriv->vm; int r; if (rdev->accel_working) { r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { if (vm->ib_bo_va) radeon_vm_bo_rmv(rdev, vm->ib_bo_va); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); } radeon_vm_fini(rdev, vm); } kfree(fpriv); file_priv->driver_priv = NULL; } } /** * radeon_driver_preclose_kms - drm callback for pre close * * @dev: drm dev pointer * @file_priv: drm file * * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx * (all asics). */ void radeon_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; if (rdev->hyperz_filp == file_priv) rdev->hyperz_filp = NULL; if (rdev->cmask_filp == file_priv) rdev->cmask_filp = NULL; radeon_uvd_free_handles(rdev, file_priv); radeon_vce_free_handles(rdev, file_priv); } /* * VBlank related functions. */ /** * radeon_get_vblank_counter_kms - get frame count * * @dev: drm dev pointer * @crtc: crtc to get the frame count from * * Gets the frame count on the requested crtc (all asics). * Returns frame count on success, -EINVAL on failure. */ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; if (crtc < 0 || crtc >= rdev->num_crtc) { DRM_ERROR("Invalid crtc %d\n", crtc); return -EINVAL; } return radeon_get_vblank_counter(rdev, crtc); } /** * radeon_enable_vblank_kms - enable vblank interrupt * * @dev: drm dev pointer * @crtc: crtc to enable vblank interrupt for * * Enable the interrupt on the requested crtc (all asics). * Returns 0 on success, -EINVAL on failure. */ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; unsigned long irqflags; int r; if (crtc < 0 || crtc >= rdev->num_crtc) { DRM_ERROR("Invalid crtc %d\n", crtc); return -EINVAL; } spin_lock_irqsave(&rdev->irq.lock, irqflags); rdev->irq.crtc_vblank_int[crtc] = true; r = radeon_irq_set(rdev); spin_unlock_irqrestore(&rdev->irq.lock, irqflags); return r; } /** * radeon_disable_vblank_kms - disable vblank interrupt * * @dev: drm dev pointer * @crtc: crtc to disable vblank interrupt for * * Disable the interrupt on the requested crtc (all asics). */ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; unsigned long irqflags; if (crtc < 0 || crtc >= rdev->num_crtc) { DRM_ERROR("Invalid crtc %d\n", crtc); return; } spin_lock_irqsave(&rdev->irq.lock, irqflags); rdev->irq.crtc_vblank_int[crtc] = false; radeon_irq_set(rdev); spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } /** * radeon_get_vblank_timestamp_kms - get vblank timestamp * * @dev: drm dev pointer * @crtc: crtc to get the timestamp for * @max_error: max error * @vblank_time: time value * @flags: flags passed to the driver * * Gets the timestamp on the requested crtc based on the * scanout position. (all asics). * Returns postive status flags on success, negative error on failure. */ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, int *max_error, struct timeval *vblank_time, unsigned flags) { struct drm_crtc *drmcrtc; struct radeon_device *rdev = dev->dev_private; if (crtc < 0 || crtc >= dev->num_crtcs) { DRM_ERROR("Invalid crtc %d\n", crtc); return -EINVAL; } /* Get associated drm_crtc: */ drmcrtc = &rdev->mode_info.crtcs[crtc]->base; if (!drmcrtc) return -EINVAL; /* Helper routine in DRM core does all the work: */ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, vblank_time, flags, drmcrtc, &drmcrtc->hwmode); } #define KMS_INVALID_IOCTL(name) \ static int name(struct drm_device *dev, void *data, struct drm_file \ *file_priv) \ { \ DRM_ERROR("invalid ioctl with kms %s\n", __func__); \ return -EINVAL; \ } /* * All these ioctls are invalid in kms world. */ KMS_INVALID_IOCTL(radeon_cp_init_kms) KMS_INVALID_IOCTL(radeon_cp_start_kms) KMS_INVALID_IOCTL(radeon_cp_stop_kms) KMS_INVALID_IOCTL(radeon_cp_reset_kms) KMS_INVALID_IOCTL(radeon_cp_idle_kms) KMS_INVALID_IOCTL(radeon_cp_resume_kms) KMS_INVALID_IOCTL(radeon_engine_reset_kms) KMS_INVALID_IOCTL(radeon_fullscreen_kms) KMS_INVALID_IOCTL(radeon_cp_swap_kms) KMS_INVALID_IOCTL(radeon_cp_clear_kms) KMS_INVALID_IOCTL(radeon_cp_vertex_kms) KMS_INVALID_IOCTL(radeon_cp_indices_kms) KMS_INVALID_IOCTL(radeon_cp_texture_kms) KMS_INVALID_IOCTL(radeon_cp_stipple_kms) KMS_INVALID_IOCTL(radeon_cp_indirect_kms) KMS_INVALID_IOCTL(radeon_cp_vertex2_kms) KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms) KMS_INVALID_IOCTL(radeon_cp_getparam_kms) KMS_INVALID_IOCTL(radeon_cp_flip_kms) KMS_INVALID_IOCTL(radeon_mem_alloc_kms) KMS_INVALID_IOCTL(radeon_mem_free_kms) KMS_INVALID_IOCTL(radeon_mem_init_heap_kms) KMS_INVALID_IOCTL(radeon_irq_emit_kms) KMS_INVALID_IOCTL(radeon_irq_wait_kms) KMS_INVALID_IOCTL(radeon_cp_setparam_kms) KMS_INVALID_IOCTL(radeon_surface_alloc_kms) KMS_INVALID_IOCTL(radeon_surface_free_kms) const struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), /* KMS */ DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), }; int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
gpl-2.0
mdeejay/android_kernel_p880
fs/cifs/dir.c
149
19995
/* * fs/cifs/dir.c * * vfs operations that deal with dentries * * Copyright (C) International Business Machines Corp., 2002,2009 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/file.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" static void renew_parental_timestamps(struct dentry *direntry) { /* BB check if there is a way to get the kernel to do this or if we really need this */ do { direntry->d_time = jiffies; direntry = direntry->d_parent; } while (!IS_ROOT(direntry)); } /* Note: caller must free return buffer */ char * build_path_from_dentry(struct dentry *direntry) { struct dentry *temp; int namelen; int pplen; int dfsplen; char *full_path; char dirsep; struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); if (direntry == NULL) return NULL; /* not much we can do if dentry is freed and we need to reopen the file after it was closed implicitly when the server crashed */ dirsep = CIFS_DIR_SEP(cifs_sb); pplen = cifs_sb->prepathlen; if (tcon->Flags & SMB_SHARE_IS_IN_DFS) dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); else dfsplen = 0; cifs_bp_rename_retry: namelen = pplen + dfsplen; for (temp = direntry; !IS_ROOT(temp);) { namelen += (1 + temp->d_name.len); temp = temp->d_parent; if (temp == NULL) { cERROR(1, "corrupt dentry"); return NULL; } } full_path = kmalloc(namelen+1, GFP_KERNEL); if (full_path == NULL) return full_path; full_path[namelen] = 0; /* trailing null */ for (temp = direntry; !IS_ROOT(temp);) { namelen -= 1 + temp->d_name.len; if (namelen < 0) { break; } else { full_path[namelen] = dirsep; strncpy(full_path + namelen + 1, temp->d_name.name, temp->d_name.len); cFYI(0, "name: %s", full_path + namelen); } temp = temp->d_parent; if (temp == NULL) { cERROR(1, "corrupt dentry"); kfree(full_path); return NULL; } } if (namelen != pplen + dfsplen) { cERROR(1, "did not end path lookup where expected namelen is %d", namelen); /* presumably this is only possible if racing with a rename of one of the parent directories (we can not lock the dentries above us to prevent this, but retrying should be harmless) */ kfree(full_path); goto cifs_bp_rename_retry; } /* DIR_SEP already set for byte 0 / vs \ but not for subsequent slashes in prepath which currently must be entered the right way - not sure if there is an alternative since the '\' is a valid posix character so we can not switch those safely to '/' if any are found in the middle of the prepath */ /* BB test paths to Windows with '/' in the midst of prepath */ if (dfsplen) { strncpy(full_path, tcon->treeName, dfsplen); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { int i; for (i = 0; i < dfsplen; i++) { if (full_path[i] == '\\') full_path[i] = '/'; } } } strncpy(full_path + dfsplen, CIFS_SB(direntry->d_sb)->prepath, pplen); return full_path; } /* Inode operations in similar order to how they appear in Linux file fs.h */ int cifs_create(struct inode *inode, struct dentry *direntry, int mode, struct nameidata *nd) { int rc = -ENOENT; int xid; int create_options = CREATE_NOT_DIR; __u32 oplock = 0; int oflags; /* * BB below access is probably too much for mknod to request * but we have to do query and setpathinfo so requesting * less could fail (unless we want to request getatr and setatr * permissions (only). At least for POSIX we do not have to * request so much. */ int desiredAccess = GENERIC_READ | GENERIC_WRITE; __u16 fileHandle; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifsTconInfo *tcon; char *full_path = NULL; FILE_ALL_INFO *buf = NULL; struct inode *newinode = NULL; int disposition = FILE_OVERWRITE_IF; xid = GetXid(); cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { FreeXid(xid); return PTR_ERR(tlink); } tcon = tlink_tcon(tlink); if (oplockEnabled) oplock = REQ_OPLOCK; if (nd && (nd->flags & LOOKUP_OPEN)) oflags = nd->intent.open.file->f_flags; else oflags = O_RDONLY | O_CREAT; full_path = build_path_from_dentry(direntry); if (full_path == NULL) { rc = -ENOMEM; goto cifs_create_out; } if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = cifs_posix_open(full_path, &newinode, inode->i_sb, mode, oflags, &oplock, &fileHandle, xid); /* EIO could indicate that (posix open) operation is not supported, despite what server claimed in capability negotiation. EREMOTE indicates DFS junction, which is not handled in posix open */ if (rc == 0) { if (newinode == NULL) /* query inode info */ goto cifs_create_get_file_info; else /* success, no need to query */ goto cifs_create_set_dentry; } else if ((rc != -EIO) && (rc != -EREMOTE) && (rc != -EOPNOTSUPP) && (rc != -EINVAL)) goto cifs_create_out; /* else fallthrough to retry, using older open call, this is case where server does not support this SMB level, and falsely claims capability (also get here for DFS case which should be rare for path not covered on files) */ } if (nd && (nd->flags & LOOKUP_OPEN)) { /* if the file is going to stay open, then we need to set the desired access properly */ desiredAccess = 0; if (OPEN_FMODE(oflags) & FMODE_READ) desiredAccess |= GENERIC_READ; /* is this too little? */ if (OPEN_FMODE(oflags) & FMODE_WRITE) desiredAccess |= GENERIC_WRITE; if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) disposition = FILE_CREATE; else if ((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) disposition = FILE_OVERWRITE_IF; else if ((oflags & O_CREAT) == O_CREAT) disposition = FILE_OPEN_IF; else cFYI(1, "Create flag not set in create function"); } /* BB add processing to set equivalent of mode - e.g. via CreateX with ACLs */ buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (buf == NULL) { rc = -ENOMEM; goto cifs_create_out; } /* * if we're not using unix extensions, see if we need to set * ATTR_READONLY on the create call */ if (!tcon->unix_ext && (mode & S_IWUGO) == 0) create_options |= CREATE_OPTION_READONLY; if (tcon->ses->capabilities & CAP_NT_SMBS) rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess, create_options, &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); else rc = -EIO; /* no NT SMB support fall into legacy open below */ if (rc == -EIO) { /* old server, retry the open legacy style */ rc = SMBLegacyOpen(xid, tcon, full_path, disposition, desiredAccess, create_options, &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } if (rc) { cFYI(1, "cifs_create returned 0x%x", rc); goto cifs_create_out; } /* If Open reported that we actually created a file then we now have to set the mode if possible */ if ((tcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) { struct cifs_unix_set_info_args args = { .mode = mode, .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = 0, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { args.uid = (__u64) current_fsuid(); if (inode->i_mode & S_ISGID) args.gid = (__u64) inode->i_gid; else args.gid = (__u64) current_fsgid(); } else { args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } CIFSSMBUnixSetFileInfo(xid, tcon, &args, fileHandle, current->tgid); } else { /* BB implement mode setting via Windows security descriptors e.g. */ /* CIFSSMBWinSetPerms(xid,tcon,path,mode,-1,-1,nls);*/ /* Could set r/o dos attribute if mode & 0222 == 0 */ } cifs_create_get_file_info: /* server might mask mode so we have to query for it */ if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); else { rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, xid, &fileHandle); if (newinode) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) newinode->i_mode = mode; if ((oplock & CIFS_CREATE_ACTION) && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) { newinode->i_uid = current_fsuid(); if (inode->i_mode & S_ISGID) newinode->i_gid = inode->i_gid; else newinode->i_gid = current_fsgid(); } } } cifs_create_set_dentry: if (rc == 0) d_instantiate(direntry, newinode); else cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); if (newinode && nd && (nd->flags & LOOKUP_OPEN)) { struct cifsFileInfo *pfile_info; struct file *filp; filp = lookup_instantiate_filp(nd, direntry, generic_file_open); if (IS_ERR(filp)) { rc = PTR_ERR(filp); CIFSSMBClose(xid, tcon, fileHandle); goto cifs_create_out; } pfile_info = cifs_new_fileinfo(fileHandle, filp, tlink, oplock); if (pfile_info == NULL) { fput(filp); CIFSSMBClose(xid, tcon, fileHandle); rc = -ENOMEM; } } else { CIFSSMBClose(xid, tcon, fileHandle); } cifs_create_out: kfree(buf); kfree(full_path); cifs_put_tlink(tlink); FreeXid(xid); return rc; } int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, dev_t device_number) { int rc = -EPERM; int xid; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifsTconInfo *pTcon; char *full_path = NULL; struct inode *newinode = NULL; int oplock = 0; u16 fileHandle; FILE_ALL_INFO *buf = NULL; unsigned int bytes_written; struct win_dev *pdev; if (!old_valid_dev(device_number)) return -EINVAL; cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); pTcon = tlink_tcon(tlink); xid = GetXid(); full_path = build_path_from_dentry(direntry); if (full_path == NULL) { rc = -ENOMEM; goto mknod_out; } if (pTcon->unix_ext) { struct cifs_unix_set_info_args args = { .mode = mode & ~current_umask(), .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = device_number, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { args.uid = (__u64) current_fsuid(); args.gid = (__u64) current_fsgid(); } else { args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc) goto mknod_out; rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); if (rc == 0) d_instantiate(direntry, newinode); goto mknod_out; } if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) goto mknod_out; cFYI(1, "sfu compat create special file"); buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (buf == NULL) { kfree(full_path); rc = -ENOMEM; FreeXid(xid); return rc; } /* FIXME: would WRITE_OWNER | WRITE_DAC be better? */ rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE, GENERIC_WRITE, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL, &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc) goto mknod_out; /* BB Do not bother to decode buf since no local inode yet to put * timestamps in, but we can reuse it safely */ pdev = (struct win_dev *)buf; if (S_ISCHR(mode)) { memcpy(pdev->type, "IntxCHR", 8); pdev->major = cpu_to_le64(MAJOR(device_number)); pdev->minor = cpu_to_le64(MINOR(device_number)); rc = CIFSSMBWrite(xid, pTcon, fileHandle, sizeof(struct win_dev), 0, &bytes_written, (char *)pdev, NULL, 0); } else if (S_ISBLK(mode)) { memcpy(pdev->type, "IntxBLK", 8); pdev->major = cpu_to_le64(MAJOR(device_number)); pdev->minor = cpu_to_le64(MINOR(device_number)); rc = CIFSSMBWrite(xid, pTcon, fileHandle, sizeof(struct win_dev), 0, &bytes_written, (char *)pdev, NULL, 0); } /* else if (S_ISFIFO) */ CIFSSMBClose(xid, pTcon, fileHandle); d_drop(direntry); /* FIXME: add code here to set EAs */ mknod_out: kfree(full_path); kfree(buf); FreeXid(xid); cifs_put_tlink(tlink); return rc; } struct dentry * cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, struct nameidata *nd) { int xid; int rc = 0; /* to get around spurious gcc warning, set to zero here */ __u32 oplock = 0; __u16 fileHandle = 0; bool posix_open = false; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifsTconInfo *pTcon; struct cifsFileInfo *cfile; struct inode *newInode = NULL; char *full_path = NULL; struct file *filp; xid = GetXid(); cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p", parent_dir_inode, direntry->d_name.name, direntry); /* check whether path exists */ cifs_sb = CIFS_SB(parent_dir_inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { FreeXid(xid); return (struct dentry *)tlink; } pTcon = tlink_tcon(tlink); /* * Don't allow the separator character in a path component. * The VFS will not allow "/", but "\" is allowed by posix. */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { int i; for (i = 0; i < direntry->d_name.len; i++) if (direntry->d_name.name[i] == '\\') { cFYI(1, "Invalid file name"); rc = -EINVAL; goto lookup_out; } } /* * O_EXCL: optimize away the lookup, but don't hash the dentry. Let * the VFS handle the create. */ if (nd && (nd->flags & LOOKUP_EXCL)) { d_instantiate(direntry, NULL); rc = 0; goto lookup_out; } /* can not grab the rename sem here since it would deadlock in the cases (beginning of sys_rename itself) in which we already have the sb rename sem */ full_path = build_path_from_dentry(direntry); if (full_path == NULL) { rc = -ENOMEM; goto lookup_out; } if (direntry->d_inode != NULL) { cFYI(1, "non-NULL inode in lookup"); } else { cFYI(1, "NULL inode in lookup"); } cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode); /* Posix open is only called (at lookup time) for file create now. * For opens (rather than creates), because we do not know if it * is a file or directory yet, and current Samba no longer allows * us to do posix open on dirs, we could end up wasting an open call * on what turns out to be a dir. For file opens, we wait to call posix * open till cifs_open. It could be added here (lookup) in the future * but the performance tradeoff of the extra network request when EISDIR * or EACCES is returned would have to be weighed against the 50% * reduction in network traffic in the other paths. */ if (pTcon->unix_ext) { if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && (nd->intent.open.file->f_flags & O_CREAT)) { rc = cifs_posix_open(full_path, &newInode, parent_dir_inode->i_sb, nd->intent.open.create_mode, nd->intent.open.file->f_flags, &oplock, &fileHandle, xid); /* * The check below works around a bug in POSIX * open in samba versions 3.3.1 and earlier where * open could incorrectly fail with invalid parameter. * If either that or op not supported returned, follow * the normal lookup. */ if ((rc == 0) || (rc == -ENOENT)) posix_open = true; else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP)) pTcon->broken_posix_open = true; } if (!posix_open) rc = cifs_get_inode_info_unix(&newInode, full_path, parent_dir_inode->i_sb, xid); } else rc = cifs_get_inode_info(&newInode, full_path, NULL, parent_dir_inode->i_sb, xid, NULL); if ((rc == 0) && (newInode != NULL)) { d_add(direntry, newInode); if (posix_open) { filp = lookup_instantiate_filp(nd, direntry, generic_file_open); if (IS_ERR(filp)) { rc = PTR_ERR(filp); CIFSSMBClose(xid, pTcon, fileHandle); goto lookup_out; } cfile = cifs_new_fileinfo(fileHandle, filp, tlink, oplock); if (cfile == NULL) { fput(filp); CIFSSMBClose(xid, pTcon, fileHandle); rc = -ENOMEM; goto lookup_out; } } /* since paths are not looked up by component - the parent directories are presumed to be good here */ renew_parental_timestamps(direntry); } else if (rc == -ENOENT) { rc = 0; direntry->d_time = jiffies; d_add(direntry, NULL); /* if it was once a directory (but how can we tell?) we could do shrink_dcache_parent(direntry); */ } else if (rc != -EACCES) { cERROR(1, "Unexpected lookup error %d", rc); /* We special case check for Access Denied - since that is a common return code */ } lookup_out: kfree(full_path); cifs_put_tlink(tlink); FreeXid(xid); return ERR_PTR(rc); } static int cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) return -ECHILD; if (direntry->d_inode) { if (cifs_revalidate_dentry(direntry)) return 0; else return 1; } /* * This may be nfsd (or something), anyway, we can't see the * intent of this. So, since this can be for creation, drop it. */ if (!nd) return 0; /* * Drop the negative dentry, in order to make sure to use the * case sensitive name which is specified by user if this is * for creation. */ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) { if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; } if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled) return 0; return 1; } /* static int cifs_d_delete(struct dentry *direntry) { int rc = 0; cFYI(1, "In cifs d_delete, name = %s", direntry->d_name.name); return rc; } */ const struct dentry_operations cifs_dentry_ops = { .d_revalidate = cifs_d_revalidate, .d_automount = cifs_dfs_d_automount, /* d_delete: cifs_d_delete, */ /* not needed except for debugging */ }; static int cifs_ci_hash(const struct dentry *dentry, const struct inode *inode, struct qstr *q) { struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls; unsigned long hash; int i; hash = init_name_hash(); for (i = 0; i < q->len; i++) hash = partial_name_hash(nls_tolower(codepage, q->name[i]), hash); q->hash = end_name_hash(hash); return 0; } static int cifs_ci_compare(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { struct nls_table *codepage = CIFS_SB(pinode->i_sb)->local_nls; if ((name->len == len) && (nls_strnicmp(codepage, name->name, str, len) == 0)) return 0; return 1; } const struct dentry_operations cifs_ci_dentry_ops = { .d_revalidate = cifs_d_revalidate, .d_hash = cifs_ci_hash, .d_compare = cifs_ci_compare, .d_automount = cifs_dfs_d_automount, };
gpl-2.0
sparkma/kernel
drivers/mtd/nand/diskonchip.c
149
50660
/* * drivers/mtd/nand/diskonchip.c * * (C) 2003 Red Hat, Inc. * (C) 2004 Dan Brown <dan_brown@ieee.org> * (C) 2004 Kalev Lember <kalev@smartlink.ee> * * Author: David Woodhouse <dwmw2@infradead.org> * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org> * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee> * * Error correction code lifted from the old docecc code * Author: Fabrice Bellard (fabrice.bellard@netgem.com) * Copyright (C) 2000 Netgem S.A. * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de> * * Interface to generic NAND code for M-Systems DiskOnChip devices */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/rslib.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/doc2000.h> #include <linux/mtd/partitions.h> #include <linux/mtd/inftl.h> /* Where to look for the devices? */ #ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS #define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0 #endif static unsigned long __initdata doc_locations[] = { #if defined (__alpha__) || defined(__i386__) || defined(__x86_64__) #ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000, 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000, 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000, 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000, 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000, #else /* CONFIG_MTD_DOCPROBE_HIGH */ 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000, 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0xde000, 0xe0000, 0xe2000, 0xe4000, 0xe6000, 0xe8000, 0xea000, 0xec000, 0xee000, #endif /* CONFIG_MTD_DOCPROBE_HIGH */ #else #warning Unknown architecture for DiskOnChip. No default probe locations defined #endif 0xffffffff }; static struct mtd_info *doclist = NULL; struct doc_priv { void __iomem *virtadr; unsigned long physadr; u_char ChipID; u_char CDSNControl; int chips_per_floor; /* The number of chips detected on each floor */ int curfloor; int curchip; int mh0_page; int mh1_page; struct mtd_info *nextdoc; }; /* This is the syndrome computed by the HW ecc generator upon reading an empty page, one with all 0xff for data and stored ecc code. */ static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a }; /* This is the ecc value computed by the HW ecc generator upon writing an empty page, one with all 0xff for data. */ static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 }; #define INFTL_BBT_RESERVED_BLOCKS 4 #define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32) #define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil) #define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k) static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask); static void doc200x_select_chip(struct mtd_info *mtd, int chip); static int debug = 0; module_param(debug, int, 0); static int try_dword = 1; module_param(try_dword, int, 0); static int no_ecc_failures = 0; module_param(no_ecc_failures, int, 0); static int no_autopart = 0; module_param(no_autopart, int, 0); static int show_firmware_partition = 0; module_param(show_firmware_partition, int, 0); #ifdef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE static int inftl_bbt_write = 1; #else static int inftl_bbt_write = 0; #endif module_param(inftl_bbt_write, int, 0); static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS; module_param(doc_config_location, ulong, 0); MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip"); /* Sector size for HW ECC */ #define SECTOR_SIZE 512 /* The sector bytes are packed into NB_DATA 10 bit words */ #define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10) /* Number of roots */ #define NROOTS 4 /* First consective root */ #define FCR 510 /* Number of symbols */ #define NN 1023 /* the Reed Solomon control structure */ static struct rs_control *rs_decoder; /* * The HW decoder in the DoC ASIC's provides us a error syndrome, * which we must convert to a standard syndrom usable by the generic * Reed-Solomon library code. * * Fabrice Bellard figured this out in the old docecc code. I added * some comments, improved a minor bit and converted it to make use * of the generic Reed-Solomon library. tglx */ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc) { int i, j, nerr, errpos[8]; uint8_t parity; uint16_t ds[4], s[5], tmp, errval[8], syn[4]; memset(syn, 0, sizeof(syn)); /* Convert the ecc bytes into words */ ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8); ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6); ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4); ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2); parity = ecc[1]; /* Initialize the syndrom buffer */ for (i = 0; i < NROOTS; i++) s[i] = ds[0]; /* * Evaluate * s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0] * where x = alpha^(FCR + i) */ for (j = 1; j < NROOTS; j++) { if (ds[j] == 0) continue; tmp = rs->index_of[ds[j]]; for (i = 0; i < NROOTS; i++) s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)]; } /* Calc syn[i] = s[i] / alpha^(v + i) */ for (i = 0; i < NROOTS; i++) { if (s[i]) syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i)); } /* Call the decoder library */ nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval); /* Incorrectable errors ? */ if (nerr < 0) return nerr; /* * Correct the errors. The bitpositions are a bit of magic, * but they are given by the design of the de/encoder circuit * in the DoC ASIC's. */ for (i = 0; i < nerr; i++) { int index, bitpos, pos = 1015 - errpos[i]; uint8_t val; if (pos >= NB_DATA && pos < 1019) continue; if (pos < NB_DATA) { /* extract bit position (MSB first) */ pos = 10 * (NB_DATA - 1 - pos) - 6; /* now correct the following 10 bits. At most two bytes can be modified since pos is even */ index = (pos >> 3) ^ 1; bitpos = pos & 7; if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) { val = (uint8_t) (errval[i] >> (2 + bitpos)); parity ^= val; if (index < SECTOR_SIZE) data[index] ^= val; } index = ((pos >> 3) + 1) ^ 1; bitpos = (bitpos + 10) & 7; if (bitpos == 0) bitpos = 8; if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) { val = (uint8_t) (errval[i] << (8 - bitpos)); parity ^= val; if (index < SECTOR_SIZE) data[index] ^= val; } } } /* If the parity is wrong, no rescue possible */ return parity ? -EBADMSG : nerr; } static void DoC_Delay(struct doc_priv *doc, unsigned short cycles) { volatile char dummy; int i; for (i = 0; i < cycles; i++) { if (DoC_is_Millennium(doc)) dummy = ReadDOC(doc->virtadr, NOP); else if (DoC_is_MillenniumPlus(doc)) dummy = ReadDOC(doc->virtadr, Mplus_NOP); else dummy = ReadDOC(doc->virtadr, DOCStatus); } } #define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1) /* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ static int _DoC_WaitReady(struct doc_priv *doc) { void __iomem *docptr = doc->virtadr; unsigned long timeo = jiffies + (HZ * 10); if (debug) printk("_DoC_WaitReady...\n"); /* Out-of-line routine to wait for chip response */ if (DoC_is_MillenniumPlus(doc)) { while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) { if (time_after(jiffies, timeo)) { printk("_DoC_WaitReady timed out.\n"); return -EIO; } udelay(1); cond_resched(); } } else { while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { if (time_after(jiffies, timeo)) { printk("_DoC_WaitReady timed out.\n"); return -EIO; } udelay(1); cond_resched(); } } return 0; } static inline int DoC_WaitReady(struct doc_priv *doc) { void __iomem *docptr = doc->virtadr; int ret = 0; if (DoC_is_MillenniumPlus(doc)) { DoC_Delay(doc, 4); if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) /* Call the out-of-line routine to wait */ ret = _DoC_WaitReady(doc); } else { DoC_Delay(doc, 4); if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) /* Call the out-of-line routine to wait */ ret = _DoC_WaitReady(doc); DoC_Delay(doc, 2); } if (debug) printk("DoC_WaitReady OK\n"); return ret; } static void doc2000_write_byte(struct mtd_info *mtd, u_char datum) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; if (debug) printk("write_byte %02x\n", datum); WriteDOC(datum, docptr, CDSNSlowIO); WriteDOC(datum, docptr, 2k_CDSN_IO); } static u_char doc2000_read_byte(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; u_char ret; ReadDOC(docptr, CDSNSlowIO); DoC_Delay(doc, 2); ret = ReadDOC(docptr, 2k_CDSN_IO); if (debug) printk("read_byte returns %02x\n", ret); return ret; } static void doc2000_writebuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("writebuf of %d bytes: ", len); for (i = 0; i < len; i++) { WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i); if (debug && i < 16) printk("%02x ", buf[i]); } if (debug) printk("\n"); } static void doc2000_readbuf(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("readbuf of %d bytes: ", len); for (i = 0; i < len; i++) { buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i); } } static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("readbuf_dword of %d bytes: ", len); if (unlikely((((unsigned long)buf) | len) & 3)) { for (i = 0; i < len; i++) { *(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i); } } else { for (i = 0; i < len; i += 4) { *(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i); } } } static int doc2000_verifybuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; for (i = 0; i < len; i++) if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO)) return -EFAULT; return 0; } static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; uint16_t ret; doc200x_select_chip(mtd, nr); doc200x_hwcontrol(mtd, NAND_CMD_READID, NAND_CTRL_CLE | NAND_CTRL_CHANGE); doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); /* We can't use dev_ready here, but at least we wait for the * command to complete */ udelay(50); ret = this->read_byte(mtd) << 8; ret |= this->read_byte(mtd); if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) { /* First chip probe. See if we get same results by 32-bit access */ union { uint32_t dword; uint8_t byte[4]; } ident; void __iomem *docptr = doc->virtadr; doc200x_hwcontrol(mtd, NAND_CMD_READID, NAND_CTRL_CLE | NAND_CTRL_CHANGE); doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); udelay(50); ident.dword = readl(docptr + DoC_2k_CDSN_IO); if (((ident.byte[0] << 8) | ident.byte[1]) == ret) { printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n"); this->read_buf = &doc2000_readbuf_dword; } } return ret; } static void __init doc2000_count_chips(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; uint16_t mfrid; int i; /* Max 4 chips per floor on DiskOnChip 2000 */ doc->chips_per_floor = 4; /* Find out what the first chip is */ mfrid = doc200x_ident_chip(mtd, 0); /* Find how many chips in each floor. */ for (i = 1; i < 4; i++) { if (doc200x_ident_chip(mtd, i) != mfrid) break; } doc->chips_per_floor = i; printk(KERN_DEBUG "Detected %d chips per floor.\n", i); } static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this) { struct doc_priv *doc = this->priv; int status; DoC_WaitReady(doc); this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); DoC_WaitReady(doc); status = (int)this->read_byte(mtd); return status; } static void doc2001_write_byte(struct mtd_info *mtd, u_char datum) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; WriteDOC(datum, docptr, CDSNSlowIO); WriteDOC(datum, docptr, Mil_CDSN_IO); WriteDOC(datum, docptr, WritePipeTerm); } static u_char doc2001_read_byte(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; //ReadDOC(docptr, CDSNSlowIO); /* 11.4.5 -- delay twice to allow extended length cycle */ DoC_Delay(doc, 2); ReadDOC(docptr, ReadPipeInit); //return ReadDOC(docptr, Mil_CDSN_IO); return ReadDOC(docptr, LastDataRead); } static void doc2001_writebuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; for (i = 0; i < len; i++) WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i); /* Terminate write pipeline */ WriteDOC(0x00, docptr, WritePipeTerm); } static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; /* Start read pipeline */ ReadDOC(docptr, ReadPipeInit); for (i = 0; i < len - 1; i++) buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff)); /* Terminate read pipeline */ buf[i] = ReadDOC(docptr, LastDataRead); } static int doc2001_verifybuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; /* Start read pipeline */ ReadDOC(docptr, ReadPipeInit); for (i = 0; i < len - 1; i++) if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) { ReadDOC(docptr, LastDataRead); return i; } if (buf[i] != ReadDOC(docptr, LastDataRead)) return i; return 0; } static u_char doc2001plus_read_byte(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; u_char ret; ReadDOC(docptr, Mplus_ReadPipeInit); ReadDOC(docptr, Mplus_ReadPipeInit); ret = ReadDOC(docptr, Mplus_LastDataRead); if (debug) printk("read_byte returns %02x\n", ret); return ret; } static void doc2001plus_writebuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("writebuf of %d bytes: ", len); for (i = 0; i < len; i++) { WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i); if (debug && i < 16) printk("%02x ", buf[i]); } if (debug) printk("\n"); } static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("readbuf of %d bytes: ", len); /* Start read pipeline */ ReadDOC(docptr, Mplus_ReadPipeInit); ReadDOC(docptr, Mplus_ReadPipeInit); for (i = 0; i < len - 2; i++) { buf[i] = ReadDOC(docptr, Mil_CDSN_IO); if (debug && i < 16) printk("%02x ", buf[i]); } /* Terminate read pipeline */ buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead); if (debug && i < 16) printk("%02x ", buf[len - 2]); buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead); if (debug && i < 16) printk("%02x ", buf[len - 1]); if (debug) printk("\n"); } static int doc2001plus_verifybuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("verifybuf of %d bytes: ", len); /* Start read pipeline */ ReadDOC(docptr, Mplus_ReadPipeInit); ReadDOC(docptr, Mplus_ReadPipeInit); for (i = 0; i < len - 2; i++) if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) { ReadDOC(docptr, Mplus_LastDataRead); ReadDOC(docptr, Mplus_LastDataRead); return i; } if (buf[len - 2] != ReadDOC(docptr, Mplus_LastDataRead)) return len - 2; if (buf[len - 1] != ReadDOC(docptr, Mplus_LastDataRead)) return len - 1; return 0; } static void doc2001plus_select_chip(struct mtd_info *mtd, int chip) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int floor = 0; if (debug) printk("select chip (%d)\n", chip); if (chip == -1) { /* Disable flash internally */ WriteDOC(0, docptr, Mplus_FlashSelect); return; } floor = chip / doc->chips_per_floor; chip -= (floor * doc->chips_per_floor); /* Assert ChipEnable and deassert WriteProtect */ WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect); this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); doc->curchip = chip; doc->curfloor = floor; } static void doc200x_select_chip(struct mtd_info *mtd, int chip) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int floor = 0; if (debug) printk("select chip (%d)\n", chip); if (chip == -1) return; floor = chip / doc->chips_per_floor; chip -= (floor * doc->chips_per_floor); /* 11.4.4 -- deassert CE before changing chip */ doc200x_hwcontrol(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE); WriteDOC(floor, docptr, FloorSelect); WriteDOC(chip, docptr, CDSNDeviceSelect); doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); doc->curchip = chip; doc->curfloor = floor; } #define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE) static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; if (ctrl & NAND_CTRL_CHANGE) { doc->CDSNControl &= ~CDSN_CTRL_MSK; doc->CDSNControl |= ctrl & CDSN_CTRL_MSK; if (debug) printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl); WriteDOC(doc->CDSNControl, docptr, CDSNControl); /* 11.4.3 -- 4 NOPs after CSDNControl write */ DoC_Delay(doc, 4); } if (cmd != NAND_CMD_NONE) { if (DoC_is_2000(doc)) doc2000_write_byte(mtd, cmd); else doc2001_write_byte(mtd, cmd); } } static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int column, int page_addr) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; /* * Must terminate write pipeline before sending any commands * to the device. */ if (command == NAND_CMD_PAGEPROG) { WriteDOC(0x00, docptr, Mplus_WritePipeTerm); WriteDOC(0x00, docptr, Mplus_WritePipeTerm); } /* * Write out the command to the device. */ if (command == NAND_CMD_SEQIN) { int readcmd; if (column >= mtd->writesize) { /* OOB area */ column -= mtd->writesize; readcmd = NAND_CMD_READOOB; } else if (column < 256) { /* First 256 bytes --> READ0 */ readcmd = NAND_CMD_READ0; } else { column -= 256; readcmd = NAND_CMD_READ1; } WriteDOC(readcmd, docptr, Mplus_FlashCmd); } WriteDOC(command, docptr, Mplus_FlashCmd); WriteDOC(0, docptr, Mplus_WritePipeTerm); WriteDOC(0, docptr, Mplus_WritePipeTerm); if (column != -1 || page_addr != -1) { /* Serially input address */ if (column != -1) { /* Adjust columns for 16 bit buswidth */ if (this->options & NAND_BUSWIDTH_16) column >>= 1; WriteDOC(column, docptr, Mplus_FlashAddress); } if (page_addr != -1) { WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress); WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress); /* One more address cycle for higher density devices */ if (this->chipsize & 0x0c000000) { WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress); printk("high density\n"); } } WriteDOC(0, docptr, Mplus_WritePipeTerm); WriteDOC(0, docptr, Mplus_WritePipeTerm); /* deassert ALE */ if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 || command == NAND_CMD_READOOB || command == NAND_CMD_READID) WriteDOC(0, docptr, Mplus_FlashControl); } /* * program and erase have their own busy handlers * status and sequential in needs no delay */ switch (command) { case NAND_CMD_PAGEPROG: case NAND_CMD_ERASE1: case NAND_CMD_ERASE2: case NAND_CMD_SEQIN: case NAND_CMD_STATUS: return; case NAND_CMD_RESET: if (this->dev_ready) break; udelay(this->chip_delay); WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd); WriteDOC(0, docptr, Mplus_WritePipeTerm); WriteDOC(0, docptr, Mplus_WritePipeTerm); while (!(this->read_byte(mtd) & 0x40)) ; return; /* This applies to read commands */ default: /* * If we don't have access to the busy pin, we apply the given * command delay */ if (!this->dev_ready) { udelay(this->chip_delay); return; } } /* Apply this short delay always to ensure that we do wait tWB in * any case on any machine. */ ndelay(100); /* wait until command is processed */ while (!this->dev_ready(mtd)) ; } static int doc200x_dev_ready(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; if (DoC_is_MillenniumPlus(doc)) { /* 11.4.2 -- must NOP four times before checking FR/B# */ DoC_Delay(doc, 4); if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) { if (debug) printk("not ready\n"); return 0; } if (debug) printk("was ready\n"); return 1; } else { /* 11.4.2 -- must NOP four times before checking FR/B# */ DoC_Delay(doc, 4); if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { if (debug) printk("not ready\n"); return 0; } /* 11.4.2 -- Must NOP twice if it's ready */ DoC_Delay(doc, 2); if (debug) printk("was ready\n"); return 1; } } static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) { /* This is our last resort if we couldn't find or create a BBT. Just pretend all blocks are good. */ return 0; } static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; /* Prime the ECC engine */ switch (mode) { case NAND_ECC_READ: WriteDOC(DOC_ECC_RESET, docptr, ECCConf); WriteDOC(DOC_ECC_EN, docptr, ECCConf); break; case NAND_ECC_WRITE: WriteDOC(DOC_ECC_RESET, docptr, ECCConf); WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf); break; } } static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; /* Prime the ECC engine */ switch (mode) { case NAND_ECC_READ: WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf); break; case NAND_ECC_WRITE: WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf); break; } } /* This code is only called on write */ static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, unsigned char *ecc_code) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; int emptymatch = 1; /* flush the pipeline */ if (DoC_is_2000(doc)) { WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl); WriteDOC(0, docptr, 2k_CDSN_IO); WriteDOC(0, docptr, 2k_CDSN_IO); WriteDOC(0, docptr, 2k_CDSN_IO); WriteDOC(doc->CDSNControl, docptr, CDSNControl); } else if (DoC_is_MillenniumPlus(doc)) { WriteDOC(0, docptr, Mplus_NOP); WriteDOC(0, docptr, Mplus_NOP); WriteDOC(0, docptr, Mplus_NOP); } else { WriteDOC(0, docptr, NOP); WriteDOC(0, docptr, NOP); WriteDOC(0, docptr, NOP); } for (i = 0; i < 6; i++) { if (DoC_is_MillenniumPlus(doc)) ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i); else ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i); if (ecc_code[i] != empty_write_ecc[i]) emptymatch = 0; } if (DoC_is_MillenniumPlus(doc)) WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); else WriteDOC(DOC_ECC_DIS, docptr, ECCConf); #if 0 /* If emptymatch=1, we might have an all-0xff data buffer. Check. */ if (emptymatch) { /* Note: this somewhat expensive test should not be triggered often. It could be optimized away by examining the data in the writebuf routine, and remembering the result. */ for (i = 0; i < 512; i++) { if (dat[i] == 0xff) continue; emptymatch = 0; break; } } /* If emptymatch still =1, we do have an all-0xff data buffer. Return all-0xff ecc value instead of the computed one, so it'll look just like a freshly-erased page. */ if (emptymatch) memset(ecc_code, 0xff, 6); #endif return 0; } static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *isnull) { int i, ret = 0; struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; uint8_t calc_ecc[6]; volatile u_char dummy; int emptymatch = 1; /* flush the pipeline */ if (DoC_is_2000(doc)) { dummy = ReadDOC(docptr, 2k_ECCStatus); dummy = ReadDOC(docptr, 2k_ECCStatus); dummy = ReadDOC(docptr, 2k_ECCStatus); } else if (DoC_is_MillenniumPlus(doc)) { dummy = ReadDOC(docptr, Mplus_ECCConf); dummy = ReadDOC(docptr, Mplus_ECCConf); dummy = ReadDOC(docptr, Mplus_ECCConf); } else { dummy = ReadDOC(docptr, ECCConf); dummy = ReadDOC(docptr, ECCConf); dummy = ReadDOC(docptr, ECCConf); } /* Error occurred ? */ if (dummy & 0x80) { for (i = 0; i < 6; i++) { if (DoC_is_MillenniumPlus(doc)) calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i); else calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i); if (calc_ecc[i] != empty_read_syndrome[i]) emptymatch = 0; } /* If emptymatch=1, the read syndrome is consistent with an all-0xff data and stored ecc block. Check the stored ecc. */ if (emptymatch) { for (i = 0; i < 6; i++) { if (read_ecc[i] == 0xff) continue; emptymatch = 0; break; } } /* If emptymatch still =1, check the data block. */ if (emptymatch) { /* Note: this somewhat expensive test should not be triggered often. It could be optimized away by examining the data in the readbuf routine, and remembering the result. */ for (i = 0; i < 512; i++) { if (dat[i] == 0xff) continue; emptymatch = 0; break; } } /* If emptymatch still =1, this is almost certainly a freshly- erased block, in which case the ECC will not come out right. We'll suppress the error and tell the caller everything's OK. Because it is. */ if (!emptymatch) ret = doc_ecc_decode(rs_decoder, dat, calc_ecc); if (ret > 0) printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret); } if (DoC_is_MillenniumPlus(doc)) WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); else WriteDOC(DOC_ECC_DIS, docptr, ECCConf); if (no_ecc_failures && (ret == -EBADMSG)) { printk(KERN_ERR "suppressing ECC failure\n"); ret = 0; } return ret; } //u_char mydatabuf[528]; /* The strange out-of-order .oobfree list below is a (possibly unneeded) * attempt to retain compatibility. It used to read: * .oobfree = { {8, 8} } * Since that leaves two bytes unusable, it was changed. But the following * scheme might affect existing jffs2 installs by moving the cleanmarker: * .oobfree = { {6, 10} } * jffs2 seems to handle the above gracefully, but the current scheme seems * safer. The only problem with it is that any code that parses oobfree must * be able to handle out-of-order segments. */ static struct nand_ecclayout doc200x_oobinfo = { .eccbytes = 6, .eccpos = {0, 1, 2, 3, 4, 5}, .oobfree = {{8, 8}, {6, 2}} }; /* Find the (I)NFTL Media Header, and optionally also the mirror media header. On successful return, buf will contain a copy of the media header for further processing. id is the string to scan for, and will presumably be either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media header. The page #s of the found media headers are placed in mh0_page and mh1_page in the DOC private structure. */ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; unsigned offs; int ret; size_t retlen; for (offs = 0; offs < mtd->size; offs += mtd->erasesize) { ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf); if (retlen != mtd->writesize) continue; if (ret) { printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", offs); } if (memcmp(buf, id, 6)) continue; printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs); if (doc->mh0_page == -1) { doc->mh0_page = offs >> this->page_shift; if (!findmirror) return 1; continue; } doc->mh1_page = offs >> this->page_shift; return 2; } if (doc->mh0_page == -1) { printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id); return 0; } /* Only one mediaheader was found. We want buf to contain a mediaheader on return, so we'll have to re-read the one we found. */ offs = doc->mh0_page << this->page_shift; ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf); if (retlen != mtd->writesize) { /* Insanity. Give up. */ printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n"); return 0; } return 1; } static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; int ret = 0; u_char *buf; struct NFTLMediaHeader *mh; const unsigned psize = 1 << this->page_shift; int numparts = 0; unsigned blocks, maxblocks; int offs, numheaders; buf = kmalloc(mtd->writesize, GFP_KERNEL); if (!buf) { printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n"); return 0; } if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1))) goto out; mh = (struct NFTLMediaHeader *)buf; le16_to_cpus(&mh->NumEraseUnits); le16_to_cpus(&mh->FirstPhysicalEUN); le32_to_cpus(&mh->FormattedSize); printk(KERN_INFO " DataOrgID = %s\n" " NumEraseUnits = %d\n" " FirstPhysicalEUN = %d\n" " FormattedSize = %d\n" " UnitSizeFactor = %d\n", mh->DataOrgID, mh->NumEraseUnits, mh->FirstPhysicalEUN, mh->FormattedSize, mh->UnitSizeFactor); blocks = mtd->size >> this->phys_erase_shift; maxblocks = min(32768U, mtd->erasesize - psize); if (mh->UnitSizeFactor == 0x00) { /* Auto-determine UnitSizeFactor. The constraints are: - There can be at most 32768 virtual blocks. - There can be at most (virtual block size - page size) virtual blocks (because MediaHeader+BBT must fit in 1). */ mh->UnitSizeFactor = 0xff; while (blocks > maxblocks) { blocks >>= 1; maxblocks = min(32768U, (maxblocks << 1) + psize); mh->UnitSizeFactor--; } printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor); } /* NOTE: The lines below modify internal variables of the NAND and MTD layers; variables with have already been configured by nand_scan. Unfortunately, we didn't know before this point what these values should be. Thus, this code is somewhat dependent on the exact implementation of the NAND layer. */ if (mh->UnitSizeFactor != 0xff) { this->bbt_erase_shift += (0xff - mh->UnitSizeFactor); mtd->erasesize <<= (0xff - mh->UnitSizeFactor); printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize); blocks = mtd->size >> this->bbt_erase_shift; maxblocks = min(32768U, mtd->erasesize - psize); } if (blocks > maxblocks) { printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor); goto out; } /* Skip past the media headers. */ offs = max(doc->mh0_page, doc->mh1_page); offs <<= this->page_shift; offs += mtd->erasesize; if (show_firmware_partition == 1) { parts[0].name = " DiskOnChip Firmware / Media Header partition"; parts[0].offset = 0; parts[0].size = offs; numparts = 1; } parts[numparts].name = " DiskOnChip BDTL partition"; parts[numparts].offset = offs; parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift; offs += parts[numparts].size; numparts++; if (offs < mtd->size) { parts[numparts].name = " DiskOnChip Remainder partition"; parts[numparts].offset = offs; parts[numparts].size = mtd->size - offs; numparts++; } ret = numparts; out: kfree(buf); return ret; } /* This is a stripped-down copy of the code in inftlmount.c */ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; int ret = 0; u_char *buf; struct INFTLMediaHeader *mh; struct INFTLPartition *ip; int numparts = 0; int blocks; int vshift, lastvunit = 0; int i; int end = mtd->size; if (inftl_bbt_write) end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift); buf = kmalloc(mtd->writesize, GFP_KERNEL); if (!buf) { printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n"); return 0; } if (!find_media_headers(mtd, buf, "BNAND", 0)) goto out; doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift); mh = (struct INFTLMediaHeader *)buf; le32_to_cpus(&mh->NoOfBootImageBlocks); le32_to_cpus(&mh->NoOfBinaryPartitions); le32_to_cpus(&mh->NoOfBDTLPartitions); le32_to_cpus(&mh->BlockMultiplierBits); le32_to_cpus(&mh->FormatFlags); le32_to_cpus(&mh->PercentUsed); printk(KERN_INFO " bootRecordID = %s\n" " NoOfBootImageBlocks = %d\n" " NoOfBinaryPartitions = %d\n" " NoOfBDTLPartitions = %d\n" " BlockMultiplerBits = %d\n" " FormatFlgs = %d\n" " OsakVersion = %d.%d.%d.%d\n" " PercentUsed = %d\n", mh->bootRecordID, mh->NoOfBootImageBlocks, mh->NoOfBinaryPartitions, mh->NoOfBDTLPartitions, mh->BlockMultiplierBits, mh->FormatFlags, ((unsigned char *) &mh->OsakVersion)[0] & 0xf, ((unsigned char *) &mh->OsakVersion)[1] & 0xf, ((unsigned char *) &mh->OsakVersion)[2] & 0xf, ((unsigned char *) &mh->OsakVersion)[3] & 0xf, mh->PercentUsed); vshift = this->phys_erase_shift + mh->BlockMultiplierBits; blocks = mtd->size >> vshift; if (blocks > 32768) { printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits); goto out; } blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift); if (inftl_bbt_write && (blocks > mtd->erasesize)) { printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n"); goto out; } /* Scan the partitions */ for (i = 0; (i < 4); i++) { ip = &(mh->Partitions[i]); le32_to_cpus(&ip->virtualUnits); le32_to_cpus(&ip->firstUnit); le32_to_cpus(&ip->lastUnit); le32_to_cpus(&ip->flags); le32_to_cpus(&ip->spareUnits); le32_to_cpus(&ip->Reserved0); printk(KERN_INFO " PARTITION[%d] ->\n" " virtualUnits = %d\n" " firstUnit = %d\n" " lastUnit = %d\n" " flags = 0x%x\n" " spareUnits = %d\n", i, ip->virtualUnits, ip->firstUnit, ip->lastUnit, ip->flags, ip->spareUnits); if ((show_firmware_partition == 1) && (i == 0) && (ip->firstUnit > 0)) { parts[0].name = " DiskOnChip IPL / Media Header partition"; parts[0].offset = 0; parts[0].size = mtd->erasesize * ip->firstUnit; numparts = 1; } if (ip->flags & INFTL_BINARY) parts[numparts].name = " DiskOnChip BDK partition"; else parts[numparts].name = " DiskOnChip BDTL partition"; parts[numparts].offset = ip->firstUnit << vshift; parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift; numparts++; if (ip->lastUnit > lastvunit) lastvunit = ip->lastUnit; if (ip->flags & INFTL_LAST) break; } lastvunit++; if ((lastvunit << vshift) < end) { parts[numparts].name = " DiskOnChip Remainder partition"; parts[numparts].offset = lastvunit << vshift; parts[numparts].size = end - parts[numparts].offset; numparts++; } ret = numparts; out: kfree(buf); return ret; } static int __init nftl_scan_bbt(struct mtd_info *mtd) { int ret, numparts; struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; struct mtd_partition parts[2]; memset((char *)parts, 0, sizeof(parts)); /* On NFTL, we have to find the media headers before we can read the BBTs, since they're stored in the media header eraseblocks. */ numparts = nftl_partscan(mtd, parts); if (!numparts) return -EIO; this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT | NAND_BBT_SAVECONTENT | NAND_BBT_WRITE | NAND_BBT_VERSION; this->bbt_td->veroffs = 7; this->bbt_td->pages[0] = doc->mh0_page + 1; if (doc->mh1_page != -1) { this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT | NAND_BBT_SAVECONTENT | NAND_BBT_WRITE | NAND_BBT_VERSION; this->bbt_md->veroffs = 7; this->bbt_md->pages[0] = doc->mh1_page + 1; } else { this->bbt_md = NULL; } /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set. At least as nand_bbt.c is currently written. */ if ((ret = nand_scan_bbt(mtd, NULL))) return ret; add_mtd_device(mtd); #ifdef CONFIG_MTD_PARTITIONS if (!no_autopart) add_mtd_partitions(mtd, parts, numparts); #endif return 0; } static int __init inftl_scan_bbt(struct mtd_info *mtd) { int ret, numparts; struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; struct mtd_partition parts[5]; if (this->numchips > doc->chips_per_floor) { printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n"); return -EIO; } if (DoC_is_MillenniumPlus(doc)) { this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE; if (inftl_bbt_write) this->bbt_td->options |= NAND_BBT_WRITE; this->bbt_td->pages[0] = 2; this->bbt_md = NULL; } else { this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION; if (inftl_bbt_write) this->bbt_td->options |= NAND_BBT_WRITE; this->bbt_td->offs = 8; this->bbt_td->len = 8; this->bbt_td->veroffs = 7; this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS; this->bbt_td->reserved_block_code = 0x01; this->bbt_td->pattern = "MSYS_BBT"; this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION; if (inftl_bbt_write) this->bbt_md->options |= NAND_BBT_WRITE; this->bbt_md->offs = 8; this->bbt_md->len = 8; this->bbt_md->veroffs = 7; this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS; this->bbt_md->reserved_block_code = 0x01; this->bbt_md->pattern = "TBB_SYSM"; } /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set. At least as nand_bbt.c is currently written. */ if ((ret = nand_scan_bbt(mtd, NULL))) return ret; memset((char *)parts, 0, sizeof(parts)); numparts = inftl_partscan(mtd, parts); /* At least for now, require the INFTL Media Header. We could probably do without it for non-INFTL use, since all it gives us is autopartitioning, but I want to give it more thought. */ if (!numparts) return -EIO; add_mtd_device(mtd); #ifdef CONFIG_MTD_PARTITIONS if (!no_autopart) add_mtd_partitions(mtd, parts, numparts); #endif return 0; } static inline int __init doc2000_init(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; this->read_byte = doc2000_read_byte; this->write_buf = doc2000_writebuf; this->read_buf = doc2000_readbuf; this->verify_buf = doc2000_verifybuf; this->scan_bbt = nftl_scan_bbt; doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO; doc2000_count_chips(mtd); mtd->name = "DiskOnChip 2000 (NFTL Model)"; return (4 * doc->chips_per_floor); } static inline int __init doc2001_init(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; this->read_byte = doc2001_read_byte; this->write_buf = doc2001_writebuf; this->read_buf = doc2001_readbuf; this->verify_buf = doc2001_verifybuf; ReadDOC(doc->virtadr, ChipID); ReadDOC(doc->virtadr, ChipID); ReadDOC(doc->virtadr, ChipID); if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) { /* It's not a Millennium; it's one of the newer DiskOnChip 2000 units with a similar ASIC. Treat it like a Millennium, except that it can have multiple chips. */ doc2000_count_chips(mtd); mtd->name = "DiskOnChip 2000 (INFTL Model)"; this->scan_bbt = inftl_scan_bbt; return (4 * doc->chips_per_floor); } else { /* Bog-standard Millennium */ doc->chips_per_floor = 1; mtd->name = "DiskOnChip Millennium"; this->scan_bbt = nftl_scan_bbt; return 1; } } static inline int __init doc2001plus_init(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; this->read_byte = doc2001plus_read_byte; this->write_buf = doc2001plus_writebuf; this->read_buf = doc2001plus_readbuf; this->verify_buf = doc2001plus_verifybuf; this->scan_bbt = inftl_scan_bbt; this->cmd_ctrl = NULL; this->select_chip = doc2001plus_select_chip; this->cmdfunc = doc2001plus_command; this->ecc.hwctl = doc2001plus_enable_hwecc; doc->chips_per_floor = 1; mtd->name = "DiskOnChip Millennium Plus"; return 1; } static int __init doc_probe(unsigned long physadr) { unsigned char ChipID; struct mtd_info *mtd; struct nand_chip *nand; struct doc_priv *doc; void __iomem *virtadr; unsigned char save_control; unsigned char tmp, tmpb, tmpc; int reg, len, numchips; int ret = 0; virtadr = ioremap(physadr, DOC_IOREMAP_LEN); if (!virtadr) { printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr); return -EIO; } /* It's not possible to cleanly detect the DiskOnChip - the * bootup procedure will put the device into reset mode, and * it's not possible to talk to it without actually writing * to the DOCControl register. So we store the current contents * of the DOCControl register's location, in case we later decide * that it's not a DiskOnChip, and want to put it back how we * found it. */ save_control = ReadDOC(virtadr, DOCControl); /* Reset the DiskOnChip ASIC */ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl); WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl); /* Enable the DiskOnChip ASIC */ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl); WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl); ChipID = ReadDOC(virtadr, ChipID); switch (ChipID) { case DOC_ChipID_Doc2k: reg = DoC_2k_ECCStatus; break; case DOC_ChipID_DocMil: reg = DoC_ECCConf; break; case DOC_ChipID_DocMilPlus16: case DOC_ChipID_DocMilPlus32: case 0: /* Possible Millennium Plus, need to do more checks */ /* Possibly release from power down mode */ for (tmp = 0; (tmp < 4); tmp++) ReadDOC(virtadr, Mplus_Power); /* Reset the Millennium Plus ASIC */ tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT; WriteDOC(tmp, virtadr, Mplus_DOCControl); WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm); mdelay(1); /* Enable the Millennium Plus ASIC */ tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT; WriteDOC(tmp, virtadr, Mplus_DOCControl); WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm); mdelay(1); ChipID = ReadDOC(virtadr, ChipID); switch (ChipID) { case DOC_ChipID_DocMilPlus16: reg = DoC_Mplus_Toggle; break; case DOC_ChipID_DocMilPlus32: printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n"); default: ret = -ENODEV; goto notfound; } break; default: ret = -ENODEV; goto notfound; } /* Check the TOGGLE bit in the ECC register */ tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; if ((tmp == tmpb) || (tmp != tmpc)) { printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr); ret = -ENODEV; goto notfound; } for (mtd = doclist; mtd; mtd = doc->nextdoc) { unsigned char oldval; unsigned char newval; nand = mtd->priv; doc = nand->priv; /* Use the alias resolution register to determine if this is in fact the same DOC aliased to a new address. If writes to one chip's alias resolution register change the value on the other chip, they're the same chip. */ if (ChipID == DOC_ChipID_DocMilPlus16) { oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution); newval = ReadDOC(virtadr, Mplus_AliasResolution); } else { oldval = ReadDOC(doc->virtadr, AliasResolution); newval = ReadDOC(virtadr, AliasResolution); } if (oldval != newval) continue; if (ChipID == DOC_ChipID_DocMilPlus16) { WriteDOC(~newval, virtadr, Mplus_AliasResolution); oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution); WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it } else { WriteDOC(~newval, virtadr, AliasResolution); oldval = ReadDOC(doc->virtadr, AliasResolution); WriteDOC(newval, virtadr, AliasResolution); // restore it } newval = ~newval; if (oldval == newval) { printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr); goto notfound; } } printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr); len = sizeof(struct mtd_info) + sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr)); mtd = kzalloc(len, GFP_KERNEL); if (!mtd) { printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len); ret = -ENOMEM; goto fail; } nand = (struct nand_chip *) (mtd + 1); doc = (struct doc_priv *) (nand + 1); nand->bbt_td = (struct nand_bbt_descr *) (doc + 1); nand->bbt_md = nand->bbt_td + 1; mtd->priv = nand; mtd->owner = THIS_MODULE; nand->priv = doc; nand->select_chip = doc200x_select_chip; nand->cmd_ctrl = doc200x_hwcontrol; nand->dev_ready = doc200x_dev_ready; nand->waitfunc = doc200x_wait; nand->block_bad = doc200x_block_bad; nand->ecc.hwctl = doc200x_enable_hwecc; nand->ecc.calculate = doc200x_calculate_ecc; nand->ecc.correct = doc200x_correct_data; nand->ecc.layout = &doc200x_oobinfo; nand->ecc.mode = NAND_ECC_HW_SYNDROME; nand->ecc.size = 512; nand->ecc.bytes = 6; nand->options = NAND_USE_FLASH_BBT; doc->physadr = physadr; doc->virtadr = virtadr; doc->ChipID = ChipID; doc->curfloor = -1; doc->curchip = -1; doc->mh0_page = -1; doc->mh1_page = -1; doc->nextdoc = doclist; if (ChipID == DOC_ChipID_Doc2k) numchips = doc2000_init(mtd); else if (ChipID == DOC_ChipID_DocMilPlus16) numchips = doc2001plus_init(mtd); else numchips = doc2001_init(mtd); if ((ret = nand_scan(mtd, numchips))) { /* DBB note: i believe nand_release is necessary here, as buffers may have been allocated in nand_base. Check with Thomas. FIX ME! */ /* nand_release will call del_mtd_device, but we haven't yet added it. This is handled without incident by del_mtd_device, as far as I can tell. */ nand_release(mtd); kfree(mtd); goto fail; } /* Success! */ doclist = mtd; return 0; notfound: /* Put back the contents of the DOCControl register, in case it's not actually a DiskOnChip. */ WriteDOC(save_control, virtadr, DOCControl); fail: iounmap(virtadr); return ret; } static void release_nanddoc(void) { struct mtd_info *mtd, *nextmtd; struct nand_chip *nand; struct doc_priv *doc; for (mtd = doclist; mtd; mtd = nextmtd) { nand = mtd->priv; doc = nand->priv; nextmtd = doc->nextdoc; nand_release(mtd); iounmap(doc->virtadr); kfree(mtd); } } static int __init init_nanddoc(void) { int i, ret = 0; /* We could create the decoder on demand, if memory is a concern. * This way we have it handy, if an error happens * * Symbolsize is 10 (bits) * Primitve polynomial is x^10+x^3+1 * first consecutive root is 510 * primitve element to generate roots = 1 * generator polinomial degree = 4 */ rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS); if (!rs_decoder) { printk(KERN_ERR "DiskOnChip: Could not create a RS decoder\n"); return -ENOMEM; } if (doc_config_location) { printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location); ret = doc_probe(doc_config_location); if (ret < 0) goto outerr; } else { for (i = 0; (doc_locations[i] != 0xffffffff); i++) { doc_probe(doc_locations[i]); } } /* No banner message any more. Print a message if no DiskOnChip found, so the user knows we at least tried. */ if (!doclist) { printk(KERN_INFO "No valid DiskOnChip devices found\n"); ret = -ENODEV; goto outerr; } return 0; outerr: free_rs(rs_decoder); return ret; } static void __exit cleanup_nanddoc(void) { /* Cleanup the nand/DoC resources */ release_nanddoc(); /* Free the reed solomon resources */ if (rs_decoder) { free_rs(rs_decoder); } } module_init(init_nanddoc); module_exit(cleanup_nanddoc); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver");
gpl-2.0
TeamNDVRu/htc-kernel-endeavoru
sound/pci/asihpi/hpimsgx.c
149
22175
/****************************************************************************** AudioScience HPI driver Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com> This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation; This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Extended Message Function With Response Cacheing (C) Copyright AudioScience Inc. 2002 *****************************************************************************/ #define SOURCEFILE_NAME "hpimsgx.c" #include "hpi_internal.h" #include "hpimsginit.h" #include "hpicmn.h" #include "hpimsgx.h" #include "hpidebug.h" static struct pci_device_id asihpi_pci_tbl[] = { #include "hpipcida.h" }; static struct hpios_spinlock msgx_lock; static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS]; static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci *pci_info) { int i; for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) { if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID && asihpi_pci_tbl[i].vendor != pci_info->pci_dev->vendor) continue; if (asihpi_pci_tbl[i].device != PCI_ANY_ID && asihpi_pci_tbl[i].device != pci_info->pci_dev->device) continue; if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID && asihpi_pci_tbl[i].subvendor != pci_info->pci_dev->subsystem_vendor) continue; if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID && asihpi_pci_tbl[i].subdevice != pci_info->pci_dev->subsystem_device) continue; /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i, asihpi_pci_tbl[i].driver_data); */ return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data; } return NULL; } static inline void hw_entry_point(struct hpi_message *phm, struct hpi_response *phr) { if ((phm->adapter_index < HPI_MAX_ADAPTERS) && hpi_entry_points[phm->adapter_index]) hpi_entry_points[phm->adapter_index] (phm, phr); else hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_PROCESSING_MESSAGE); } static void adapter_open(struct hpi_message *phm, struct hpi_response *phr); static void adapter_close(struct hpi_message *phm, struct hpi_response *phr); static void mixer_open(struct hpi_message *phm, struct hpi_response *phr); static void mixer_close(struct hpi_message *phm, struct hpi_response *phr); static void outstream_open(struct hpi_message *phm, struct hpi_response *phr, void *h_owner); static void outstream_close(struct hpi_message *phm, struct hpi_response *phr, void *h_owner); static void instream_open(struct hpi_message *phm, struct hpi_response *phr, void *h_owner); static void instream_close(struct hpi_message *phm, struct hpi_response *phr, void *h_owner); static void HPIMSGX__reset(u16 adapter_index); static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr); static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner); #ifndef DISABLE_PRAGMA_PACK1 #pragma pack(push, 1) #endif struct hpi_subsys_response { struct hpi_response_header h; struct hpi_subsys_res s; }; struct hpi_adapter_response { struct hpi_response_header h; struct hpi_adapter_res a; }; struct hpi_mixer_response { struct hpi_response_header h; struct hpi_mixer_res m; }; struct hpi_stream_response { struct hpi_response_header h; struct hpi_stream_res d; }; struct adapter_info { u16 type; u16 num_instreams; u16 num_outstreams; }; struct asi_open_state { int open_flag; void *h_owner; }; #ifndef DISABLE_PRAGMA_PACK1 #pragma pack(pop) #endif /* Globals */ static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS]; static struct hpi_stream_response rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS]; static struct hpi_stream_response rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS]; static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS]; static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS]; /* use these to keep track of opens from user mode apps/DLLs */ static struct asi_open_state outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS]; static struct asi_open_state instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS]; static void subsys_message(struct hpi_message *phm, struct hpi_response *phr, void *h_owner) { if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID) HPI_DEBUG_LOG(WARNING, "suspicious adapter index %d in subsys message 0x%x.\n", phm->adapter_index, phm->function); switch (phm->function) { case HPI_SUBSYS_GET_VERSION: hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_GET_VERSION, 0); phr->u.s.version = HPI_VER >> 8; /* return major.minor */ phr->u.s.data = HPI_VER; /* return major.minor.release */ break; case HPI_SUBSYS_OPEN: /*do not propagate the message down the chain */ hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0); break; case HPI_SUBSYS_CLOSE: /*do not propagate the message down the chain */ hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE, 0); HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner); break; case HPI_SUBSYS_DRIVER_LOAD: /* Initialize this module's internal state */ hpios_msgxlock_init(&msgx_lock); memset(&hpi_entry_points, 0, sizeof(hpi_entry_points)); hpios_locked_mem_init(); /* Init subsys_findadapters response to no-adapters */ HPIMSGX__reset(HPIMSGX_ALLADAPTERS); hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_DRIVER_LOAD, 0); /* individual HPIs dont implement driver load */ HPI_COMMON(phm, phr); break; case HPI_SUBSYS_DRIVER_UNLOAD: HPI_COMMON(phm, phr); HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner); hpios_locked_mem_free_all(); hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_DRIVER_UNLOAD, 0); return; case HPI_SUBSYS_GET_NUM_ADAPTERS: case HPI_SUBSYS_GET_ADAPTER: HPI_COMMON(phm, phr); break; case HPI_SUBSYS_CREATE_ADAPTER: HPIMSGX__init(phm, phr); break; case HPI_SUBSYS_DELETE_ADAPTER: HPIMSGX__cleanup(phm->obj_index, h_owner); { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE); hm.adapter_index = phm->obj_index; hw_entry_point(&hm, &hr); } if ((phm->obj_index < HPI_MAX_ADAPTERS) && hpi_entry_points[phm->obj_index]) { hpi_entry_points[phm->obj_index] (phm, phr); hpi_entry_points[phm->obj_index] = NULL; } else phr->error = HPI_ERROR_INVALID_OBJ_INDEX; break; default: /* Must explicitly handle every subsys message in this switch */ hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function, HPI_ERROR_INVALID_FUNC); break; } } static void adapter_message(struct hpi_message *phm, struct hpi_response *phr, void *h_owner) { switch (phm->function) { case HPI_ADAPTER_OPEN: adapter_open(phm, phr); break; case HPI_ADAPTER_CLOSE: adapter_close(phm, phr); break; default: hw_entry_point(phm, phr); break; } } static void mixer_message(struct hpi_message *phm, struct hpi_response *phr) { switch (phm->function) { case HPI_MIXER_OPEN: mixer_open(phm, phr); break; case HPI_MIXER_CLOSE: mixer_close(phm, phr); break; default: hw_entry_point(phm, phr); break; } } static void outstream_message(struct hpi_message *phm, struct hpi_response *phr, void *h_owner) { if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) { hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function, HPI_ERROR_INVALID_OBJ_INDEX); return; } switch (phm->function) { case HPI_OSTREAM_OPEN: outstream_open(phm, phr, h_owner); break; case HPI_OSTREAM_CLOSE: outstream_close(phm, phr, h_owner); break; default: hw_entry_point(phm, phr); break; } } static void instream_message(struct hpi_message *phm, struct hpi_response *phr, void *h_owner) { if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) { hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function, HPI_ERROR_INVALID_OBJ_INDEX); return; } switch (phm->function) { case HPI_ISTREAM_OPEN: instream_open(phm, phr, h_owner); break; case HPI_ISTREAM_CLOSE: instream_close(phm, phr, h_owner); break; default: hw_entry_point(phm, phr); break; } } /* NOTE: HPI_Message() must be defined in the driver as a wrapper for * HPI_MessageEx so that functions in hpifunc.c compile. */ void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr, void *h_owner) { HPI_DEBUG_MESSAGE(DEBUG, phm); if (phm->type != HPI_TYPE_MESSAGE) { hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_INVALID_TYPE); return; } if (phm->adapter_index >= HPI_MAX_ADAPTERS && phm->adapter_index != HPIMSGX_ALLADAPTERS) { hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_BAD_ADAPTER_NUMBER); return; } switch (phm->object) { case HPI_OBJ_SUBSYSTEM: subsys_message(phm, phr, h_owner); break; case HPI_OBJ_ADAPTER: adapter_message(phm, phr, h_owner); break; case HPI_OBJ_MIXER: mixer_message(phm, phr); break; case HPI_OBJ_OSTREAM: outstream_message(phm, phr, h_owner); break; case HPI_OBJ_ISTREAM: instream_message(phm, phr, h_owner); break; default: hw_entry_point(phm, phr); break; } HPI_DEBUG_RESPONSE(phr); } static void adapter_open(struct hpi_message *phm, struct hpi_response *phr) { HPI_DEBUG_LOG(VERBOSE, "adapter_open\n"); memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index], sizeof(rESP_HPI_ADAPTER_OPEN[0])); } static void adapter_close(struct hpi_message *phm, struct hpi_response *phr) { HPI_DEBUG_LOG(VERBOSE, "adapter_close\n"); hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0); } static void mixer_open(struct hpi_message *phm, struct hpi_response *phr) { memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index], sizeof(rESP_HPI_MIXER_OPEN[0])); } static void mixer_close(struct hpi_message *phm, struct hpi_response *phr) { hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0); } static void instream_open(struct hpi_message *phm, struct hpi_response *phr, void *h_owner) { struct hpi_message hm; struct hpi_response hr; hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0); hpios_msgxlock_lock(&msgx_lock); if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag) phr->error = HPI_ERROR_OBJ_ALREADY_OPEN; else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index] [phm->obj_index].h.error) memcpy(phr, &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm-> obj_index], sizeof(rESP_HPI_ISTREAM_OPEN[0][0])); else { instream_user_open[phm->adapter_index][phm-> obj_index].open_flag = 1; hpios_msgxlock_unlock(&msgx_lock); /* issue a reset */ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET); hm.adapter_index = phm->adapter_index; hm.obj_index = phm->obj_index; hw_entry_point(&hm, &hr); hpios_msgxlock_lock(&msgx_lock); if (hr.error) { instream_user_open[phm->adapter_index][phm-> obj_index].open_flag = 0; phr->error = hr.error; } else { instream_user_open[phm->adapter_index][phm-> obj_index].open_flag = 1; instream_user_open[phm->adapter_index][phm-> obj_index].h_owner = h_owner; memcpy(phr, &rESP_HPI_ISTREAM_OPEN[phm->adapter_index] [phm->obj_index], sizeof(rESP_HPI_ISTREAM_OPEN[0][0])); } } hpios_msgxlock_unlock(&msgx_lock); } static void instream_close(struct hpi_message *phm, struct hpi_response *phr, void *h_owner) { struct hpi_message hm; struct hpi_response hr; hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0); hpios_msgxlock_lock(&msgx_lock); if (h_owner == instream_user_open[phm->adapter_index][phm-> obj_index].h_owner) { /* HPI_DEBUG_LOG(INFO,"closing adapter %d " "instream %d owned by %p\n", phm->wAdapterIndex, phm->wObjIndex, hOwner); */ instream_user_open[phm->adapter_index][phm-> obj_index].h_owner = NULL; hpios_msgxlock_unlock(&msgx_lock); /* issue a reset */ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET); hm.adapter_index = phm->adapter_index; hm.obj_index = phm->obj_index; hw_entry_point(&hm, &hr); hpios_msgxlock_lock(&msgx_lock); if (hr.error) { instream_user_open[phm->adapter_index][phm-> obj_index].h_owner = h_owner; phr->error = hr.error; } else { instream_user_open[phm->adapter_index][phm-> obj_index].open_flag = 0; instream_user_open[phm->adapter_index][phm-> obj_index].h_owner = NULL; } } else { HPI_DEBUG_LOG(WARNING, "%p trying to close %d instream %d owned by %p\n", h_owner, phm->adapter_index, phm->obj_index, instream_user_open[phm->adapter_index][phm-> obj_index].h_owner); phr->error = HPI_ERROR_OBJ_NOT_OPEN; } hpios_msgxlock_unlock(&msgx_lock); } static void outstream_open(struct hpi_message *phm, struct hpi_response *phr, void *h_owner) { struct hpi_message hm; struct hpi_response hr; hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0); hpios_msgxlock_lock(&msgx_lock); if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag) phr->error = HPI_ERROR_OBJ_ALREADY_OPEN; else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index] [phm->obj_index].h.error) memcpy(phr, &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm-> obj_index], sizeof(rESP_HPI_OSTREAM_OPEN[0][0])); else { outstream_user_open[phm->adapter_index][phm-> obj_index].open_flag = 1; hpios_msgxlock_unlock(&msgx_lock); /* issue a reset */ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET); hm.adapter_index = phm->adapter_index; hm.obj_index = phm->obj_index; hw_entry_point(&hm, &hr); hpios_msgxlock_lock(&msgx_lock); if (hr.error) { outstream_user_open[phm->adapter_index][phm-> obj_index].open_flag = 0; phr->error = hr.error; } else { outstream_user_open[phm->adapter_index][phm-> obj_index].open_flag = 1; outstream_user_open[phm->adapter_index][phm-> obj_index].h_owner = h_owner; memcpy(phr, &rESP_HPI_OSTREAM_OPEN[phm->adapter_index] [phm->obj_index], sizeof(rESP_HPI_OSTREAM_OPEN[0][0])); } } hpios_msgxlock_unlock(&msgx_lock); } static void outstream_close(struct hpi_message *phm, struct hpi_response *phr, void *h_owner) { struct hpi_message hm; struct hpi_response hr; hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0); hpios_msgxlock_lock(&msgx_lock); if (h_owner == outstream_user_open[phm->adapter_index][phm-> obj_index].h_owner) { /* HPI_DEBUG_LOG(INFO,"closing adapter %d " "outstream %d owned by %p\n", phm->wAdapterIndex, phm->wObjIndex, hOwner); */ outstream_user_open[phm->adapter_index][phm-> obj_index].h_owner = NULL; hpios_msgxlock_unlock(&msgx_lock); /* issue a reset */ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET); hm.adapter_index = phm->adapter_index; hm.obj_index = phm->obj_index; hw_entry_point(&hm, &hr); hpios_msgxlock_lock(&msgx_lock); if (hr.error) { outstream_user_open[phm->adapter_index][phm-> obj_index].h_owner = h_owner; phr->error = hr.error; } else { outstream_user_open[phm->adapter_index][phm-> obj_index].open_flag = 0; outstream_user_open[phm->adapter_index][phm-> obj_index].h_owner = NULL; } } else { HPI_DEBUG_LOG(WARNING, "%p trying to close %d outstream %d owned by %p\n", h_owner, phm->adapter_index, phm->obj_index, outstream_user_open[phm->adapter_index][phm-> obj_index].h_owner); phr->error = HPI_ERROR_OBJ_NOT_OPEN; } hpios_msgxlock_unlock(&msgx_lock); } static u16 adapter_prepare(u16 adapter) { struct hpi_message hm; struct hpi_response hr; /* Open the adapter and streams */ u16 i; /* call to HPI_ADAPTER_OPEN */ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_OPEN); hm.adapter_index = adapter; hw_entry_point(&hm, &hr); memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr, sizeof(rESP_HPI_ADAPTER_OPEN[0])); if (hr.error) return hr.error; /* call to HPI_ADAPTER_GET_INFO */ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_GET_INFO); hm.adapter_index = adapter; hw_entry_point(&hm, &hr); if (hr.error) return hr.error; aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams; aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams; aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type; /* call to HPI_OSTREAM_OPEN */ for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) { hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN); hm.adapter_index = adapter; hm.obj_index = i; hw_entry_point(&hm, &hr); memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr, sizeof(rESP_HPI_OSTREAM_OPEN[0][0])); outstream_user_open[adapter][i].open_flag = 0; outstream_user_open[adapter][i].h_owner = NULL; } /* call to HPI_ISTREAM_OPEN */ for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) { hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN); hm.adapter_index = adapter; hm.obj_index = i; hw_entry_point(&hm, &hr); memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr, sizeof(rESP_HPI_ISTREAM_OPEN[0][0])); instream_user_open[adapter][i].open_flag = 0; instream_user_open[adapter][i].h_owner = NULL; } /* call to HPI_MIXER_OPEN */ hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN); hm.adapter_index = adapter; hw_entry_point(&hm, &hr); memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr, sizeof(rESP_HPI_MIXER_OPEN[0])); return 0; } static void HPIMSGX__reset(u16 adapter_index) { int i; u16 adapter; struct hpi_response hr; if (adapter_index == HPIMSGX_ALLADAPTERS) { for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) { hpi_init_response(&hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER); memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr, sizeof(rESP_HPI_ADAPTER_OPEN[adapter])); hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN, HPI_ERROR_INVALID_OBJ); memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr, sizeof(rESP_HPI_MIXER_OPEN[adapter])); for (i = 0; i < HPI_MAX_STREAMS; i++) { hpi_init_response(&hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, HPI_ERROR_INVALID_OBJ); memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr, sizeof(rESP_HPI_OSTREAM_OPEN[adapter] [i])); hpi_init_response(&hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, HPI_ERROR_INVALID_OBJ); memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr, sizeof(rESP_HPI_ISTREAM_OPEN[adapter] [i])); } } } else if (adapter_index < HPI_MAX_ADAPTERS) { rESP_HPI_ADAPTER_OPEN[adapter_index].h.error = HPI_ERROR_BAD_ADAPTER; rESP_HPI_MIXER_OPEN[adapter_index].h.error = HPI_ERROR_INVALID_OBJ; for (i = 0; i < HPI_MAX_STREAMS; i++) { rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error = HPI_ERROR_INVALID_OBJ; rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error = HPI_ERROR_INVALID_OBJ; } } } static u16 HPIMSGX__init(struct hpi_message *phm, /* HPI_SUBSYS_CREATE_ADAPTER structure with */ /* resource list or NULL=find all */ struct hpi_response *phr /* response from HPI_ADAPTER_GET_INFO */ ) { hpi_handler_func *entry_point_func; struct hpi_response hr; /* Init response here so we can pass in previous adapter list */ hpi_init_response(&hr, phm->object, phm->function, HPI_ERROR_INVALID_OBJ); entry_point_func = hpi_lookup_entry_point_function(phm->u.s.resource.r.pci); if (entry_point_func) { HPI_DEBUG_MESSAGE(DEBUG, phm); entry_point_func(phm, &hr); } else { phr->error = HPI_ERROR_PROCESSING_MESSAGE; return phr->error; } if (hr.error == 0) { /* the adapter was created successfully save the mapping for future use */ hpi_entry_points[hr.u.s.adapter_index] = entry_point_func; /* prepare adapter (pre-open streams etc.) */ HPI_DEBUG_LOG(DEBUG, "HPI_SUBSYS_CREATE_ADAPTER successful," " preparing adapter\n"); adapter_prepare(hr.u.s.adapter_index); } memcpy(phr, &hr, hr.size); return phr->error; } static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner) { int i, adapter, adapter_limit; if (!h_owner) return; if (adapter_index == HPIMSGX_ALLADAPTERS) { adapter = 0; adapter_limit = HPI_MAX_ADAPTERS; } else { adapter = adapter_index; adapter_limit = adapter + 1; } for (; adapter < adapter_limit; adapter++) { /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */ for (i = 0; i < HPI_MAX_STREAMS; i++) { if (h_owner == outstream_user_open[adapter][i].h_owner) { struct hpi_message hm; struct hpi_response hr; HPI_DEBUG_LOG(DEBUG, "Close adapter %d ostream %d\n", adapter, i); hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET); hm.adapter_index = (u16)adapter; hm.obj_index = (u16)i; hw_entry_point(&hm, &hr); hm.function = HPI_OSTREAM_HOSTBUFFER_FREE; hw_entry_point(&hm, &hr); hm.function = HPI_OSTREAM_GROUP_RESET; hw_entry_point(&hm, &hr); outstream_user_open[adapter][i].open_flag = 0; outstream_user_open[adapter][i].h_owner = NULL; } if (h_owner == instream_user_open[adapter][i].h_owner) { struct hpi_message hm; struct hpi_response hr; HPI_DEBUG_LOG(DEBUG, "Close adapter %d istream %d\n", adapter, i); hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET); hm.adapter_index = (u16)adapter; hm.obj_index = (u16)i; hw_entry_point(&hm, &hr); hm.function = HPI_ISTREAM_HOSTBUFFER_FREE; hw_entry_point(&hm, &hr); hm.function = HPI_ISTREAM_GROUP_RESET; hw_entry_point(&hm, &hr); instream_user_open[adapter][i].open_flag = 0; instream_user_open[adapter][i].h_owner = NULL; } } } }
gpl-2.0
escalator2015/linux
drivers/usb/atm/xusbatm.c
917
7763
/****************************************************************************** * xusbatm.c - dumb usbatm-based driver for modems initialized in userspace * * Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ******************************************************************************/ #include <linux/module.h> #include <linux/etherdevice.h> /* for eth_random_addr() */ #include "usbatm.h" #define XUSBATM_DRIVERS_MAX 8 #define XUSBATM_PARM(name, type, parmtype, desc) \ static type name[XUSBATM_DRIVERS_MAX]; \ static unsigned int num_##name; \ module_param_array(name, parmtype, &num_##name, 0444); \ MODULE_PARM_DESC(name, desc) XUSBATM_PARM(vendor, unsigned short, ushort, "USB device vendor"); XUSBATM_PARM(product, unsigned short, ushort, "USB device product"); XUSBATM_PARM(rx_endpoint, unsigned char, byte, "rx endpoint number"); XUSBATM_PARM(tx_endpoint, unsigned char, byte, "tx endpoint number"); XUSBATM_PARM(rx_padding, unsigned char, byte, "rx padding (default 0)"); XUSBATM_PARM(tx_padding, unsigned char, byte, "tx padding (default 0)"); XUSBATM_PARM(rx_altsetting, unsigned char, byte, "rx altsetting (default 0)"); XUSBATM_PARM(tx_altsetting, unsigned char, byte, "rx altsetting (default 0)"); static const char xusbatm_driver_name[] = "xusbatm"; static struct usbatm_driver xusbatm_drivers[XUSBATM_DRIVERS_MAX]; static struct usb_device_id xusbatm_usb_ids[XUSBATM_DRIVERS_MAX + 1]; static struct usb_driver xusbatm_usb_driver; static struct usb_interface *xusbatm_find_intf(struct usb_device *usb_dev, int altsetting, u8 ep) { struct usb_host_interface *alt; struct usb_interface *intf; int i, j; for (i = 0; i < usb_dev->actconfig->desc.bNumInterfaces; i++) if ((intf = usb_dev->actconfig->interface[i]) && (alt = usb_altnum_to_altsetting(intf, altsetting))) for (j = 0; j < alt->desc.bNumEndpoints; j++) if (alt->endpoint[j].desc.bEndpointAddress == ep) return intf; return NULL; } static int xusbatm_capture_intf(struct usbatm_data *usbatm, struct usb_device *usb_dev, struct usb_interface *intf, int altsetting, int claim) { int ifnum = intf->altsetting->desc.bInterfaceNumber; int ret; if (claim && (ret = usb_driver_claim_interface(&xusbatm_usb_driver, intf, usbatm))) { usb_err(usbatm, "%s: failed to claim interface %2d (%d)!\n", __func__, ifnum, ret); return ret; } ret = usb_set_interface(usb_dev, ifnum, altsetting); if (ret) { usb_err(usbatm, "%s: altsetting %2d for interface %2d failed (%d)!\n", __func__, altsetting, ifnum, ret); return ret; } return 0; } static void xusbatm_release_intf(struct usb_device *usb_dev, struct usb_interface *intf, int claimed) { if (claimed) { usb_set_intfdata(intf, NULL); usb_driver_release_interface(&xusbatm_usb_driver, intf); } } static int xusbatm_bind(struct usbatm_data *usbatm, struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(intf); int drv_ix = id - xusbatm_usb_ids; int rx_alt = rx_altsetting[drv_ix]; int tx_alt = tx_altsetting[drv_ix]; struct usb_interface *rx_intf = xusbatm_find_intf(usb_dev, rx_alt, rx_endpoint[drv_ix]); struct usb_interface *tx_intf = xusbatm_find_intf(usb_dev, tx_alt, tx_endpoint[drv_ix]); int ret; usb_dbg(usbatm, "%s: binding driver %d: vendor %04x product %04x" " rx: ep %02x padd %d alt %2d tx: ep %02x padd %d alt %2d\n", __func__, drv_ix, vendor[drv_ix], product[drv_ix], rx_endpoint[drv_ix], rx_padding[drv_ix], rx_alt, tx_endpoint[drv_ix], tx_padding[drv_ix], tx_alt); if (!rx_intf || !tx_intf) { if (!rx_intf) usb_dbg(usbatm, "%s: no interface contains endpoint %02x in altsetting %2d\n", __func__, rx_endpoint[drv_ix], rx_alt); if (!tx_intf) usb_dbg(usbatm, "%s: no interface contains endpoint %02x in altsetting %2d\n", __func__, tx_endpoint[drv_ix], tx_alt); return -ENODEV; } if ((rx_intf != intf) && (tx_intf != intf)) return -ENODEV; if ((rx_intf == tx_intf) && (rx_alt != tx_alt)) { usb_err(usbatm, "%s: altsettings clash on interface %2d (%2d vs %2d)!\n", __func__, rx_intf->altsetting->desc.bInterfaceNumber, rx_alt, tx_alt); return -EINVAL; } usb_dbg(usbatm, "%s: rx If#=%2d; tx If#=%2d\n", __func__, rx_intf->altsetting->desc.bInterfaceNumber, tx_intf->altsetting->desc.bInterfaceNumber); ret = xusbatm_capture_intf(usbatm, usb_dev, rx_intf, rx_alt, rx_intf != intf); if (ret) return ret; if ((tx_intf != rx_intf) && (ret = xusbatm_capture_intf(usbatm, usb_dev, tx_intf, tx_alt, tx_intf != intf))) { xusbatm_release_intf(usb_dev, rx_intf, rx_intf != intf); return ret; } return 0; } static void xusbatm_unbind(struct usbatm_data *usbatm, struct usb_interface *intf) { struct usb_device *usb_dev = interface_to_usbdev(intf); int i; usb_dbg(usbatm, "%s entered\n", __func__); for (i = 0; i < usb_dev->actconfig->desc.bNumInterfaces; i++) { struct usb_interface *cur_intf = usb_dev->actconfig->interface[i]; if (cur_intf && (usb_get_intfdata(cur_intf) == usbatm)) { usb_set_intfdata(cur_intf, NULL); usb_driver_release_interface(&xusbatm_usb_driver, cur_intf); } } } static int xusbatm_atm_start(struct usbatm_data *usbatm, struct atm_dev *atm_dev) { atm_dbg(usbatm, "%s entered\n", __func__); /* use random MAC as we've no way to get it from the device */ eth_random_addr(atm_dev->esi); return 0; } static int xusbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return usbatm_usb_probe(intf, id, xusbatm_drivers + (id - xusbatm_usb_ids)); } static struct usb_driver xusbatm_usb_driver = { .name = xusbatm_driver_name, .probe = xusbatm_usb_probe, .disconnect = usbatm_usb_disconnect, .id_table = xusbatm_usb_ids }; static int __init xusbatm_init(void) { int i; if (!num_vendor || num_vendor != num_product || num_vendor != num_rx_endpoint || num_vendor != num_tx_endpoint) { printk(KERN_WARNING "xusbatm: malformed module parameters\n"); return -EINVAL; } for (i = 0; i < num_vendor; i++) { rx_endpoint[i] |= USB_DIR_IN; tx_endpoint[i] &= USB_ENDPOINT_NUMBER_MASK; xusbatm_usb_ids[i].match_flags = USB_DEVICE_ID_MATCH_DEVICE; xusbatm_usb_ids[i].idVendor = vendor[i]; xusbatm_usb_ids[i].idProduct = product[i]; xusbatm_drivers[i].driver_name = xusbatm_driver_name; xusbatm_drivers[i].bind = xusbatm_bind; xusbatm_drivers[i].unbind = xusbatm_unbind; xusbatm_drivers[i].atm_start = xusbatm_atm_start; xusbatm_drivers[i].bulk_in = rx_endpoint[i]; xusbatm_drivers[i].bulk_out = tx_endpoint[i]; xusbatm_drivers[i].rx_padding = rx_padding[i]; xusbatm_drivers[i].tx_padding = tx_padding[i]; } return usb_register(&xusbatm_usb_driver); } module_init(xusbatm_init); static void __exit xusbatm_exit(void) { usb_deregister(&xusbatm_usb_driver); } module_exit(xusbatm_exit); MODULE_AUTHOR("Roman Kagan, Duncan Sands"); MODULE_DESCRIPTION("Driver for USB ADSL modems initialized in userspace"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1");
gpl-2.0
shambakey1/kernel_sh
arch/arm/mach-at91/at91rm9200_devices.c
1429
29744
/* * arch/arm/mach-at91/at91rm9200_devices.c * * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org> * Copyright (C) 2005 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/i2c-gpio.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/at91rm9200.h> #include <mach/at91rm9200_mc.h> #include "generic.h" /* -------------------------------------------------------------------- * USB Host * -------------------------------------------------------------------- */ #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static u64 ohci_dmamask = DMA_BIT_MASK(32); static struct at91_usbh_data usbh_data; static struct resource usbh_resources[] = { [0] = { .start = AT91RM9200_UHP_BASE, .end = AT91RM9200_UHP_BASE + SZ_1M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_UHP, .end = AT91RM9200_ID_UHP, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91rm9200_usbh_device = { .name = "at91_ohci", .id = -1, .dev = { .dma_mask = &ohci_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &usbh_data, }, .resource = usbh_resources, .num_resources = ARRAY_SIZE(usbh_resources), }; void __init at91_add_device_usbh(struct at91_usbh_data *data) { if (!data) return; usbh_data = *data; platform_device_register(&at91rm9200_usbh_device); } #else void __init at91_add_device_usbh(struct at91_usbh_data *data) {} #endif /* -------------------------------------------------------------------- * USB Device (Gadget) * -------------------------------------------------------------------- */ #ifdef CONFIG_USB_GADGET_AT91 static struct at91_udc_data udc_data; static struct resource udc_resources[] = { [0] = { .start = AT91RM9200_BASE_UDP, .end = AT91RM9200_BASE_UDP + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_UDP, .end = AT91RM9200_ID_UDP, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91rm9200_udc_device = { .name = "at91_udc", .id = -1, .dev = { .platform_data = &udc_data, }, .resource = udc_resources, .num_resources = ARRAY_SIZE(udc_resources), }; void __init at91_add_device_udc(struct at91_udc_data *data) { if (!data) return; if (data->vbus_pin) { at91_set_gpio_input(data->vbus_pin, 0); at91_set_deglitch(data->vbus_pin, 1); } if (data->pullup_pin) at91_set_gpio_output(data->pullup_pin, 0); udc_data = *data; platform_device_register(&at91rm9200_udc_device); } #else void __init at91_add_device_udc(struct at91_udc_data *data) {} #endif /* -------------------------------------------------------------------- * Ethernet * -------------------------------------------------------------------- */ #if defined(CONFIG_ARM_AT91_ETHER) || defined(CONFIG_ARM_AT91_ETHER_MODULE) static u64 eth_dmamask = DMA_BIT_MASK(32); static struct at91_eth_data eth_data; static struct resource eth_resources[] = { [0] = { .start = AT91_VA_BASE_EMAC, .end = AT91_VA_BASE_EMAC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_EMAC, .end = AT91RM9200_ID_EMAC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91rm9200_eth_device = { .name = "at91_ether", .id = -1, .dev = { .dma_mask = &eth_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &eth_data, }, .resource = eth_resources, .num_resources = ARRAY_SIZE(eth_resources), }; void __init at91_add_device_eth(struct at91_eth_data *data) { if (!data) return; if (data->phy_irq_pin) { at91_set_gpio_input(data->phy_irq_pin, 0); at91_set_deglitch(data->phy_irq_pin, 1); } /* Pins used for MII and RMII */ at91_set_A_periph(AT91_PIN_PA16, 0); /* EMDIO */ at91_set_A_periph(AT91_PIN_PA15, 0); /* EMDC */ at91_set_A_periph(AT91_PIN_PA14, 0); /* ERXER */ at91_set_A_periph(AT91_PIN_PA13, 0); /* ERX1 */ at91_set_A_periph(AT91_PIN_PA12, 0); /* ERX0 */ at91_set_A_periph(AT91_PIN_PA11, 0); /* ECRS_ECRSDV */ at91_set_A_periph(AT91_PIN_PA10, 0); /* ETX1 */ at91_set_A_periph(AT91_PIN_PA9, 0); /* ETX0 */ at91_set_A_periph(AT91_PIN_PA8, 0); /* ETXEN */ at91_set_A_periph(AT91_PIN_PA7, 0); /* ETXCK_EREFCK */ if (!data->is_rmii) { at91_set_B_periph(AT91_PIN_PB19, 0); /* ERXCK */ at91_set_B_periph(AT91_PIN_PB18, 0); /* ECOL */ at91_set_B_periph(AT91_PIN_PB17, 0); /* ERXDV */ at91_set_B_periph(AT91_PIN_PB16, 0); /* ERX3 */ at91_set_B_periph(AT91_PIN_PB15, 0); /* ERX2 */ at91_set_B_periph(AT91_PIN_PB14, 0); /* ETXER */ at91_set_B_periph(AT91_PIN_PB13, 0); /* ETX3 */ at91_set_B_periph(AT91_PIN_PB12, 0); /* ETX2 */ } eth_data = *data; platform_device_register(&at91rm9200_eth_device); } #else void __init at91_add_device_eth(struct at91_eth_data *data) {} #endif /* -------------------------------------------------------------------- * Compact Flash / PCMCIA * -------------------------------------------------------------------- */ #if defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE) static struct at91_cf_data cf_data; #define CF_BASE AT91_CHIPSELECT_4 static struct resource cf_resources[] = { [0] = { .start = CF_BASE, /* ties up CS4, CS5 and CS6 */ .end = CF_BASE + (0x30000000 - 1), .flags = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT, }, }; static struct platform_device at91rm9200_cf_device = { .name = "at91_cf", .id = -1, .dev = { .platform_data = &cf_data, }, .resource = cf_resources, .num_resources = ARRAY_SIZE(cf_resources), }; void __init at91_add_device_cf(struct at91_cf_data *data) { unsigned int csa; if (!data) return; data->chipselect = 4; /* can only use EBI ChipSelect 4 */ /* CF takes over CS4, CS5, CS6 */ csa = at91_sys_read(AT91_EBI_CSA); at91_sys_write(AT91_EBI_CSA, csa | AT91_EBI_CS4A_SMC_COMPACTFLASH); /* * Static memory controller timing adjustments. * REVISIT: these timings are in terms of MCK cycles, so * when MCK changes (cpufreq etc) so must these values... */ at91_sys_write(AT91_SMC_CSR(4), AT91_SMC_ACSS_STD | AT91_SMC_DBW_16 | AT91_SMC_BAT | AT91_SMC_WSEN | AT91_SMC_NWS_(32) /* wait states */ | AT91_SMC_RWSETUP_(6) /* setup time */ | AT91_SMC_RWHOLD_(4) /* hold time */ ); /* input/irq */ if (data->irq_pin) { at91_set_gpio_input(data->irq_pin, 1); at91_set_deglitch(data->irq_pin, 1); } at91_set_gpio_input(data->det_pin, 1); at91_set_deglitch(data->det_pin, 1); /* outputs, initially off */ if (data->vcc_pin) at91_set_gpio_output(data->vcc_pin, 0); at91_set_gpio_output(data->rst_pin, 0); /* force poweron defaults for these pins ... */ at91_set_A_periph(AT91_PIN_PC9, 0); /* A25/CFRNW */ at91_set_A_periph(AT91_PIN_PC10, 0); /* NCS4/CFCS */ at91_set_A_periph(AT91_PIN_PC11, 0); /* NCS5/CFCE1 */ at91_set_A_periph(AT91_PIN_PC12, 0); /* NCS6/CFCE2 */ /* nWAIT is _not_ a default setting */ at91_set_A_periph(AT91_PIN_PC6, 1); /* nWAIT */ cf_data = *data; platform_device_register(&at91rm9200_cf_device); } #else void __init at91_add_device_cf(struct at91_cf_data *data) {} #endif /* -------------------------------------------------------------------- * MMC / SD * -------------------------------------------------------------------- */ #if defined(CONFIG_MMC_AT91) || defined(CONFIG_MMC_AT91_MODULE) static u64 mmc_dmamask = DMA_BIT_MASK(32); static struct at91_mmc_data mmc_data; static struct resource mmc_resources[] = { [0] = { .start = AT91RM9200_BASE_MCI, .end = AT91RM9200_BASE_MCI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_MCI, .end = AT91RM9200_ID_MCI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91rm9200_mmc_device = { .name = "at91_mci", .id = -1, .dev = { .dma_mask = &mmc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &mmc_data, }, .resource = mmc_resources, .num_resources = ARRAY_SIZE(mmc_resources), }; void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) { if (!data) return; /* input/irq */ if (data->det_pin) { at91_set_gpio_input(data->det_pin, 1); at91_set_deglitch(data->det_pin, 1); } if (data->wp_pin) at91_set_gpio_input(data->wp_pin, 1); if (data->vcc_pin) at91_set_gpio_output(data->vcc_pin, 0); /* CLK */ at91_set_A_periph(AT91_PIN_PA27, 0); if (data->slot_b) { /* CMD */ at91_set_B_periph(AT91_PIN_PA8, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_B_periph(AT91_PIN_PA9, 1); if (data->wire4) { at91_set_B_periph(AT91_PIN_PA10, 1); at91_set_B_periph(AT91_PIN_PA11, 1); at91_set_B_periph(AT91_PIN_PA12, 1); } } else { /* CMD */ at91_set_A_periph(AT91_PIN_PA28, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_A_periph(AT91_PIN_PA29, 1); if (data->wire4) { at91_set_B_periph(AT91_PIN_PB3, 1); at91_set_B_periph(AT91_PIN_PB4, 1); at91_set_B_periph(AT91_PIN_PB5, 1); } } mmc_data = *data; platform_device_register(&at91rm9200_mmc_device); } #else void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {} #endif /* -------------------------------------------------------------------- * NAND / SmartMedia * -------------------------------------------------------------------- */ #if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE) static struct atmel_nand_data nand_data; #define NAND_BASE AT91_CHIPSELECT_3 static struct resource nand_resources[] = { { .start = NAND_BASE, .end = NAND_BASE + SZ_256M - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device at91rm9200_nand_device = { .name = "atmel_nand", .id = -1, .dev = { .platform_data = &nand_data, }, .resource = nand_resources, .num_resources = ARRAY_SIZE(nand_resources), }; void __init at91_add_device_nand(struct atmel_nand_data *data) { unsigned int csa; if (!data) return; /* enable the address range of CS3 */ csa = at91_sys_read(AT91_EBI_CSA); at91_sys_write(AT91_EBI_CSA, csa | AT91_EBI_CS3A_SMC_SMARTMEDIA); /* set the bus interface characteristics */ at91_sys_write(AT91_SMC_CSR(3), AT91_SMC_ACSS_STD | AT91_SMC_DBW_8 | AT91_SMC_WSEN | AT91_SMC_NWS_(5) | AT91_SMC_TDF_(1) | AT91_SMC_RWSETUP_(0) /* tDS Data Set up Time 30 - ns */ | AT91_SMC_RWHOLD_(1) /* tDH Data Hold Time 20 - ns */ ); /* enable pin */ if (data->enable_pin) at91_set_gpio_output(data->enable_pin, 1); /* ready/busy pin */ if (data->rdy_pin) at91_set_gpio_input(data->rdy_pin, 1); /* card detect pin */ if (data->det_pin) at91_set_gpio_input(data->det_pin, 1); at91_set_A_periph(AT91_PIN_PC1, 0); /* SMOE */ at91_set_A_periph(AT91_PIN_PC3, 0); /* SMWE */ nand_data = *data; platform_device_register(&at91rm9200_nand_device); } #else void __init at91_add_device_nand(struct atmel_nand_data *data) {} #endif /* -------------------------------------------------------------------- * TWI (i2c) * -------------------------------------------------------------------- */ /* * Prefer the GPIO code since the TWI controller isn't robust * (gets overruns and underruns under load) and can only issue * repeated STARTs in one scenario (the driver doesn't yet handle them). */ #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) static struct i2c_gpio_platform_data pdata = { .sda_pin = AT91_PIN_PA25, .sda_is_open_drain = 1, .scl_pin = AT91_PIN_PA26, .scl_is_open_drain = 1, .udelay = 2, /* ~100 kHz */ }; static struct platform_device at91rm9200_twi_device = { .name = "i2c-gpio", .id = -1, .dev.platform_data = &pdata, }; void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) { at91_set_GPIO_periph(AT91_PIN_PA25, 1); /* TWD (SDA) */ at91_set_multi_drive(AT91_PIN_PA25, 1); at91_set_GPIO_periph(AT91_PIN_PA26, 1); /* TWCK (SCL) */ at91_set_multi_drive(AT91_PIN_PA26, 1); i2c_register_board_info(0, devices, nr_devices); platform_device_register(&at91rm9200_twi_device); } #elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE) static struct resource twi_resources[] = { [0] = { .start = AT91RM9200_BASE_TWI, .end = AT91RM9200_BASE_TWI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_TWI, .end = AT91RM9200_ID_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91rm9200_twi_device = { .name = "at91_i2c", .id = -1, .resource = twi_resources, .num_resources = ARRAY_SIZE(twi_resources), }; void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) { /* pins used for TWI interface */ at91_set_A_periph(AT91_PIN_PA25, 0); /* TWD */ at91_set_multi_drive(AT91_PIN_PA25, 1); at91_set_A_periph(AT91_PIN_PA26, 0); /* TWCK */ at91_set_multi_drive(AT91_PIN_PA26, 1); i2c_register_board_info(0, devices, nr_devices); platform_device_register(&at91rm9200_twi_device); } #else void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {} #endif /* -------------------------------------------------------------------- * SPI * -------------------------------------------------------------------- */ #if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE) static u64 spi_dmamask = DMA_BIT_MASK(32); static struct resource spi_resources[] = { [0] = { .start = AT91RM9200_BASE_SPI, .end = AT91RM9200_BASE_SPI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_SPI, .end = AT91RM9200_ID_SPI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91rm9200_spi_device = { .name = "atmel_spi", .id = 0, .dev = { .dma_mask = &spi_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = spi_resources, .num_resources = ARRAY_SIZE(spi_resources), }; static const unsigned spi_standard_cs[4] = { AT91_PIN_PA3, AT91_PIN_PA4, AT91_PIN_PA5, AT91_PIN_PA6 }; void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) { int i; unsigned long cs_pin; at91_set_A_periph(AT91_PIN_PA0, 0); /* MISO */ at91_set_A_periph(AT91_PIN_PA1, 0); /* MOSI */ at91_set_A_periph(AT91_PIN_PA2, 0); /* SPCK */ /* Enable SPI chip-selects */ for (i = 0; i < nr_devices; i++) { if (devices[i].controller_data) cs_pin = (unsigned long) devices[i].controller_data; else cs_pin = spi_standard_cs[devices[i].chip_select]; if (devices[i].chip_select == 0) /* for CS0 errata */ at91_set_A_periph(cs_pin, 0); else at91_set_gpio_output(cs_pin, 1); /* pass chip-select pin to driver */ devices[i].controller_data = (void *) cs_pin; } spi_register_board_info(devices, nr_devices); platform_device_register(&at91rm9200_spi_device); } #else void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {} #endif /* -------------------------------------------------------------------- * Timer/Counter blocks * -------------------------------------------------------------------- */ #ifdef CONFIG_ATMEL_TCLIB static struct resource tcb0_resources[] = { [0] = { .start = AT91RM9200_BASE_TCB0, .end = AT91RM9200_BASE_TCB0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_TC0, .end = AT91RM9200_ID_TC0, .flags = IORESOURCE_IRQ, }, [2] = { .start = AT91RM9200_ID_TC1, .end = AT91RM9200_ID_TC1, .flags = IORESOURCE_IRQ, }, [3] = { .start = AT91RM9200_ID_TC2, .end = AT91RM9200_ID_TC2, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91rm9200_tcb0_device = { .name = "atmel_tcb", .id = 0, .resource = tcb0_resources, .num_resources = ARRAY_SIZE(tcb0_resources), }; static struct resource tcb1_resources[] = { [0] = { .start = AT91RM9200_BASE_TCB1, .end = AT91RM9200_BASE_TCB1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_TC3, .end = AT91RM9200_ID_TC3, .flags = IORESOURCE_IRQ, }, [2] = { .start = AT91RM9200_ID_TC4, .end = AT91RM9200_ID_TC4, .flags = IORESOURCE_IRQ, }, [3] = { .start = AT91RM9200_ID_TC5, .end = AT91RM9200_ID_TC5, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91rm9200_tcb1_device = { .name = "atmel_tcb", .id = 1, .resource = tcb1_resources, .num_resources = ARRAY_SIZE(tcb1_resources), }; static void __init at91_add_device_tc(void) { platform_device_register(&at91rm9200_tcb0_device); platform_device_register(&at91rm9200_tcb1_device); } #else static void __init at91_add_device_tc(void) { } #endif /* -------------------------------------------------------------------- * RTC * -------------------------------------------------------------------- */ #if defined(CONFIG_RTC_DRV_AT91RM9200) || defined(CONFIG_RTC_DRV_AT91RM9200_MODULE) static struct platform_device at91rm9200_rtc_device = { .name = "at91_rtc", .id = -1, .num_resources = 0, }; static void __init at91_add_device_rtc(void) { platform_device_register(&at91rm9200_rtc_device); } #else static void __init at91_add_device_rtc(void) {} #endif /* -------------------------------------------------------------------- * Watchdog * -------------------------------------------------------------------- */ #if defined(CONFIG_AT91RM9200_WATCHDOG) || defined(CONFIG_AT91RM9200_WATCHDOG_MODULE) static struct platform_device at91rm9200_wdt_device = { .name = "at91_wdt", .id = -1, .num_resources = 0, }; static void __init at91_add_device_watchdog(void) { platform_device_register(&at91rm9200_wdt_device); } #else static void __init at91_add_device_watchdog(void) {} #endif /* -------------------------------------------------------------------- * SSC -- Synchronous Serial Controller * -------------------------------------------------------------------- */ #if defined(CONFIG_ATMEL_SSC) || defined(CONFIG_ATMEL_SSC_MODULE) static u64 ssc0_dmamask = DMA_BIT_MASK(32); static struct resource ssc0_resources[] = { [0] = { .start = AT91RM9200_BASE_SSC0, .end = AT91RM9200_BASE_SSC0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_SSC0, .end = AT91RM9200_ID_SSC0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91rm9200_ssc0_device = { .name = "ssc", .id = 0, .dev = { .dma_mask = &ssc0_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = ssc0_resources, .num_resources = ARRAY_SIZE(ssc0_resources), }; static inline void configure_ssc0_pins(unsigned pins) { if (pins & ATMEL_SSC_TF) at91_set_A_periph(AT91_PIN_PB0, 1); if (pins & ATMEL_SSC_TK) at91_set_A_periph(AT91_PIN_PB1, 1); if (pins & ATMEL_SSC_TD) at91_set_A_periph(AT91_PIN_PB2, 1); if (pins & ATMEL_SSC_RD) at91_set_A_periph(AT91_PIN_PB3, 1); if (pins & ATMEL_SSC_RK) at91_set_A_periph(AT91_PIN_PB4, 1); if (pins & ATMEL_SSC_RF) at91_set_A_periph(AT91_PIN_PB5, 1); } static u64 ssc1_dmamask = DMA_BIT_MASK(32); static struct resource ssc1_resources[] = { [0] = { .start = AT91RM9200_BASE_SSC1, .end = AT91RM9200_BASE_SSC1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_SSC1, .end = AT91RM9200_ID_SSC1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91rm9200_ssc1_device = { .name = "ssc", .id = 1, .dev = { .dma_mask = &ssc1_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = ssc1_resources, .num_resources = ARRAY_SIZE(ssc1_resources), }; static inline void configure_ssc1_pins(unsigned pins) { if (pins & ATMEL_SSC_TF) at91_set_A_periph(AT91_PIN_PB6, 1); if (pins & ATMEL_SSC_TK) at91_set_A_periph(AT91_PIN_PB7, 1); if (pins & ATMEL_SSC_TD) at91_set_A_periph(AT91_PIN_PB8, 1); if (pins & ATMEL_SSC_RD) at91_set_A_periph(AT91_PIN_PB9, 1); if (pins & ATMEL_SSC_RK) at91_set_A_periph(AT91_PIN_PB10, 1); if (pins & ATMEL_SSC_RF) at91_set_A_periph(AT91_PIN_PB11, 1); } static u64 ssc2_dmamask = DMA_BIT_MASK(32); static struct resource ssc2_resources[] = { [0] = { .start = AT91RM9200_BASE_SSC2, .end = AT91RM9200_BASE_SSC2 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_SSC2, .end = AT91RM9200_ID_SSC2, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91rm9200_ssc2_device = { .name = "ssc", .id = 2, .dev = { .dma_mask = &ssc2_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = ssc2_resources, .num_resources = ARRAY_SIZE(ssc2_resources), }; static inline void configure_ssc2_pins(unsigned pins) { if (pins & ATMEL_SSC_TF) at91_set_A_periph(AT91_PIN_PB12, 1); if (pins & ATMEL_SSC_TK) at91_set_A_periph(AT91_PIN_PB13, 1); if (pins & ATMEL_SSC_TD) at91_set_A_periph(AT91_PIN_PB14, 1); if (pins & ATMEL_SSC_RD) at91_set_A_periph(AT91_PIN_PB15, 1); if (pins & ATMEL_SSC_RK) at91_set_A_periph(AT91_PIN_PB16, 1); if (pins & ATMEL_SSC_RF) at91_set_A_periph(AT91_PIN_PB17, 1); } /* * SSC controllers are accessed through library code, instead of any * kind of all-singing/all-dancing driver. For example one could be * used by a particular I2S audio codec's driver, while another one * on the same system might be used by a custom data capture driver. */ void __init at91_add_device_ssc(unsigned id, unsigned pins) { struct platform_device *pdev; /* * NOTE: caller is responsible for passing information matching * "pins" to whatever will be using each particular controller. */ switch (id) { case AT91RM9200_ID_SSC0: pdev = &at91rm9200_ssc0_device; configure_ssc0_pins(pins); break; case AT91RM9200_ID_SSC1: pdev = &at91rm9200_ssc1_device; configure_ssc1_pins(pins); break; case AT91RM9200_ID_SSC2: pdev = &at91rm9200_ssc2_device; configure_ssc2_pins(pins); break; default: return; } platform_device_register(pdev); } #else void __init at91_add_device_ssc(unsigned id, unsigned pins) {} #endif /* -------------------------------------------------------------------- * UART * -------------------------------------------------------------------- */ #if defined(CONFIG_SERIAL_ATMEL) static struct resource dbgu_resources[] = { [0] = { .start = AT91_VA_BASE_SYS + AT91_DBGU, .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91_ID_SYS, .end = AT91_ID_SYS, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data dbgu_data = { .use_dma_tx = 0, .use_dma_rx = 0, /* DBGU not capable of receive DMA */ .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU), }; static u64 dbgu_dmamask = DMA_BIT_MASK(32); static struct platform_device at91rm9200_dbgu_device = { .name = "atmel_usart", .id = 0, .dev = { .dma_mask = &dbgu_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &dbgu_data, }, .resource = dbgu_resources, .num_resources = ARRAY_SIZE(dbgu_resources), }; static inline void configure_dbgu_pins(void) { at91_set_A_periph(AT91_PIN_PA30, 0); /* DRXD */ at91_set_A_periph(AT91_PIN_PA31, 1); /* DTXD */ } static struct resource uart0_resources[] = { [0] = { .start = AT91RM9200_BASE_US0, .end = AT91RM9200_BASE_US0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_US0, .end = AT91RM9200_ID_US0, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart0_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart0_dmamask = DMA_BIT_MASK(32); static struct platform_device at91rm9200_uart0_device = { .name = "atmel_usart", .id = 1, .dev = { .dma_mask = &uart0_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart0_data, }, .resource = uart0_resources, .num_resources = ARRAY_SIZE(uart0_resources), }; static inline void configure_usart0_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PA17, 1); /* TXD0 */ at91_set_A_periph(AT91_PIN_PA18, 0); /* RXD0 */ if (pins & ATMEL_UART_CTS) at91_set_A_periph(AT91_PIN_PA20, 0); /* CTS0 */ if (pins & ATMEL_UART_RTS) { /* * AT91RM9200 Errata #39 - RTS0 is not internally connected to PA21. * We need to drive the pin manually. Default is off (RTS is active low). */ at91_set_gpio_output(AT91_PIN_PA21, 1); } } static struct resource uart1_resources[] = { [0] = { .start = AT91RM9200_BASE_US1, .end = AT91RM9200_BASE_US1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_US1, .end = AT91RM9200_ID_US1, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart1_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart1_dmamask = DMA_BIT_MASK(32); static struct platform_device at91rm9200_uart1_device = { .name = "atmel_usart", .id = 2, .dev = { .dma_mask = &uart1_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart1_data, }, .resource = uart1_resources, .num_resources = ARRAY_SIZE(uart1_resources), }; static inline void configure_usart1_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB20, 1); /* TXD1 */ at91_set_A_periph(AT91_PIN_PB21, 0); /* RXD1 */ if (pins & ATMEL_UART_RI) at91_set_A_periph(AT91_PIN_PB18, 0); /* RI1 */ if (pins & ATMEL_UART_DTR) at91_set_A_periph(AT91_PIN_PB19, 0); /* DTR1 */ if (pins & ATMEL_UART_DCD) at91_set_A_periph(AT91_PIN_PB23, 0); /* DCD1 */ if (pins & ATMEL_UART_CTS) at91_set_A_periph(AT91_PIN_PB24, 0); /* CTS1 */ if (pins & ATMEL_UART_DSR) at91_set_A_periph(AT91_PIN_PB25, 0); /* DSR1 */ if (pins & ATMEL_UART_RTS) at91_set_A_periph(AT91_PIN_PB26, 0); /* RTS1 */ } static struct resource uart2_resources[] = { [0] = { .start = AT91RM9200_BASE_US2, .end = AT91RM9200_BASE_US2 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_US2, .end = AT91RM9200_ID_US2, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart2_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart2_dmamask = DMA_BIT_MASK(32); static struct platform_device at91rm9200_uart2_device = { .name = "atmel_usart", .id = 3, .dev = { .dma_mask = &uart2_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart2_data, }, .resource = uart2_resources, .num_resources = ARRAY_SIZE(uart2_resources), }; static inline void configure_usart2_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PA22, 0); /* RXD2 */ at91_set_A_periph(AT91_PIN_PA23, 1); /* TXD2 */ if (pins & ATMEL_UART_CTS) at91_set_B_periph(AT91_PIN_PA30, 0); /* CTS2 */ if (pins & ATMEL_UART_RTS) at91_set_B_periph(AT91_PIN_PA31, 0); /* RTS2 */ } static struct resource uart3_resources[] = { [0] = { .start = AT91RM9200_BASE_US3, .end = AT91RM9200_BASE_US3 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91RM9200_ID_US3, .end = AT91RM9200_ID_US3, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart3_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart3_dmamask = DMA_BIT_MASK(32); static struct platform_device at91rm9200_uart3_device = { .name = "atmel_usart", .id = 4, .dev = { .dma_mask = &uart3_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart3_data, }, .resource = uart3_resources, .num_resources = ARRAY_SIZE(uart3_resources), }; static inline void configure_usart3_pins(unsigned pins) { at91_set_B_periph(AT91_PIN_PA5, 1); /* TXD3 */ at91_set_B_periph(AT91_PIN_PA6, 0); /* RXD3 */ if (pins & ATMEL_UART_CTS) at91_set_B_periph(AT91_PIN_PB1, 0); /* CTS3 */ if (pins & ATMEL_UART_RTS) at91_set_B_periph(AT91_PIN_PB0, 0); /* RTS3 */ } static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */ struct platform_device *atmel_default_console_device; /* the serial console device */ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) { struct platform_device *pdev; struct atmel_uart_data *pdata; switch (id) { case 0: /* DBGU */ pdev = &at91rm9200_dbgu_device; configure_dbgu_pins(); break; case AT91RM9200_ID_US0: pdev = &at91rm9200_uart0_device; configure_usart0_pins(pins); break; case AT91RM9200_ID_US1: pdev = &at91rm9200_uart1_device; configure_usart1_pins(pins); break; case AT91RM9200_ID_US2: pdev = &at91rm9200_uart2_device; configure_usart2_pins(pins); break; case AT91RM9200_ID_US3: pdev = &at91rm9200_uart3_device; configure_usart3_pins(pins); break; default: return; } pdata = pdev->dev.platform_data; pdata->num = portnr; /* update to mapped ID */ if (portnr < ATMEL_MAX_UART) at91_uarts[portnr] = pdev; } void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; at91rm9200_set_console_clock(at91_uarts[portnr]->id); } } void __init at91_add_device_serial(void) { int i; for (i = 0; i < ATMEL_MAX_UART; i++) { if (at91_uarts[i]) platform_device_register(at91_uarts[i]); } if (!atmel_default_console_device) printk(KERN_INFO "AT91: No default serial console defined.\n"); } #else void __init __deprecated at91_init_serial(struct at91_uart_config *config) {} void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {} void __init at91_set_serial_console(unsigned portnr) {} void __init at91_add_device_serial(void) {} #endif /* -------------------------------------------------------------------- */ /* * These devices are always present and don't need any board-specific * setup. */ static int __init at91_add_standard_devices(void) { at91_add_device_rtc(); at91_add_device_watchdog(); at91_add_device_tc(); return 0; } arch_initcall(at91_add_standard_devices);
gpl-2.0
utopykzebulon/android_kernel_msm7x30-3.0
fs/proc/uptime.c
1685
1311
#include <linux/fs.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/time.h> #include <linux/kernel_stat.h> #include <asm/cputime.h> static int uptime_proc_show(struct seq_file *m, void *v) { struct timespec uptime; struct timespec idle; cputime64_t idletime; u64 nsec; u32 rem; int i; idletime = 0; for_each_possible_cpu(i) idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle); do_posix_clock_monotonic_gettime(&uptime); monotonic_to_bootbased(&uptime); nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC; idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); idle.tv_nsec = rem; seq_printf(m, "%lu.%02lu %lu.%02lu\n", (unsigned long) uptime.tv_sec, (uptime.tv_nsec / (NSEC_PER_SEC / 100)), (unsigned long) idle.tv_sec, (idle.tv_nsec / (NSEC_PER_SEC / 100))); return 0; } static int uptime_proc_open(struct inode *inode, struct file *file) { return single_open(file, uptime_proc_show, NULL); } static const struct file_operations uptime_proc_fops = { .open = uptime_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_uptime_init(void) { proc_create("uptime", 0, NULL, &uptime_proc_fops); return 0; } module_init(proc_uptime_init);
gpl-2.0
TREX-ROM/kernel_n5_racer
drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
2197
23101
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <mach/msm_memtypes.h> #include "vcd_ddl.h" #include "vcd_ddl_metadata.h" #include "vcd_res_tracker_api.h" static unsigned int first_time; u32 ddl_device_init(struct ddl_init_config *ddl_init_config, void *client_data) { struct ddl_context *ddl_context; u32 status = VCD_S_SUCCESS; void *ptr = NULL; DDL_MSG_HIGH("ddl_device_init"); if ((!ddl_init_config) || (!ddl_init_config->ddl_callback) || (!ddl_init_config->core_virtual_base_addr)) { DDL_MSG_ERROR("ddl_dev_init:Bad_argument"); return VCD_ERR_ILLEGAL_PARM; } ddl_context = ddl_get_context(); if (DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_dev_init:Multiple_init"); return VCD_ERR_ILLEGAL_OP; } if (!DDL_IS_IDLE(ddl_context)) { DDL_MSG_ERROR("ddl_dev_init:Ddl_busy"); return VCD_ERR_BUSY; } memset(ddl_context, 0, sizeof(struct ddl_context)); DDL_BUSY(ddl_context); if (res_trk_get_enable_ion()) { DDL_MSG_LOW("ddl_dev_init:ION framework enabled"); ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { DDL_MSG_ERROR("ION client create failed"); return VCD_ERR_ILLEGAL_OP; } } ddl_context->ddl_callback = ddl_init_config->ddl_callback; if (ddl_init_config->interrupt_clr) ddl_context->interrupt_clr = ddl_init_config->interrupt_clr; ddl_context->core_virtual_base_addr = ddl_init_config->core_virtual_base_addr; ddl_context->client_data = client_data; ddl_context->ddl_hw_response.arg1 = DDL_INVALID_INTR_STATUS; ddl_context->frame_channel_depth = VCD_FRAME_COMMAND_DEPTH; DDL_MSG_LOW("%s() : virtual address of core(%x)\n", __func__, (u32) ddl_init_config->core_virtual_base_addr); vidc_1080p_set_device_base_addr( ddl_context->core_virtual_base_addr); ddl_context->cmd_state = DDL_CMD_INVALID; ddl_client_transact(DDL_INIT_CLIENTS, NULL); ddl_context->fw_memory_size = DDL_FW_INST_GLOBAL_CONTEXT_SPACE_SIZE; if (res_trk_get_firmware_addr(&ddl_context->dram_base_a)) { DDL_MSG_ERROR("firmware allocation failed"); ptr = NULL; } else { ptr = (void *)ddl_context->dram_base_a.virtual_base_addr; } if (!ptr) { DDL_MSG_ERROR("Memory Aocation Failed for FW Base"); status = VCD_ERR_ALLOC_FAIL; } else { DDL_MSG_LOW("%s() : physical address of base(%x)\n", __func__, (u32) ddl_context->dram_base_a.\ align_physical_addr); ddl_context->dram_base_b.align_physical_addr = ddl_context->dram_base_a.align_physical_addr; ddl_context->dram_base_b.align_virtual_addr = ddl_context->dram_base_a.align_virtual_addr; } if (!status) { ddl_context->metadata_shared_input.mem_type = DDL_CMD_MEM; ptr = ddl_pmem_alloc(&ddl_context->metadata_shared_input, DDL_METADATA_TOTAL_INPUTBUFSIZE, DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!ptr) { DDL_MSG_ERROR("ddl_device_init: metadata alloc fail"); status = VCD_ERR_ALLOC_FAIL; } } if (!status && !ddl_fw_init(&ddl_context->dram_base_a)) { DDL_MSG_ERROR("ddl_dev_init:fw_init_failed"); status = VCD_ERR_ALLOC_FAIL; } if (!status) { ddl_context->cmd_state = DDL_CMD_DMA_INIT; ddl_vidc_core_init(ddl_context); } else { ddl_release_context_buffers(ddl_context); DDL_IDLE(ddl_context); } return status; } u32 ddl_device_release(void *client_data) { struct ddl_context *ddl_context; DDL_MSG_HIGH("ddl_device_release"); ddl_context = ddl_get_context(); if (!DDL_IS_IDLE(ddl_context)) { DDL_MSG_ERROR("ddl_dev_rel:Ddl_busy"); return VCD_ERR_BUSY; } if (!DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_dev_rel:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (!ddl_client_transact(DDL_ACTIVE_CLIENT, NULL)) { DDL_MSG_ERROR("ddl_dev_rel:Client_present_err"); return VCD_ERR_CLIENT_PRESENT; } DDL_BUSY(ddl_context); ddl_context->device_state = DDL_DEVICE_NOTINIT; ddl_context->client_data = client_data; ddl_context->cmd_state = DDL_CMD_INVALID; ddl_vidc_core_term(ddl_context); DDL_MSG_LOW("FW_ENDDONE"); ddl_context->core_virtual_base_addr = NULL; ddl_release_context_buffers(ddl_context); ddl_context->video_ion_client = NULL; DDL_IDLE(ddl_context); return VCD_S_SUCCESS; } u32 ddl_open(u32 **ddl_handle, u32 decoding) { struct ddl_context *ddl_context; struct ddl_client_context *ddl; void *ptr; u32 status; DDL_MSG_HIGH("ddl_open"); if (!ddl_handle) { DDL_MSG_ERROR("ddl_open:Bad_handle"); return VCD_ERR_BAD_HANDLE; } ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_open:Not_inited"); return VCD_ERR_ILLEGAL_OP; } status = ddl_client_transact(DDL_GET_CLIENT, &ddl); if (status) { DDL_MSG_ERROR("ddl_open:Client_trasac_failed"); return status; } if (res_trk_check_for_sec_session()) ddl->shared_mem[0].mem_type = DDL_CMD_MEM; else ddl->shared_mem[0].mem_type = DDL_FW_MEM; ptr = ddl_pmem_alloc(&ddl->shared_mem[0], DDL_FW_AUX_HOST_CMD_SPACE_SIZE, 0); if (!ptr) status = VCD_ERR_ALLOC_FAIL; if (!status && ddl_context->frame_channel_depth == VCD_DUAL_FRAME_COMMAND_CHANNEL) { if (res_trk_check_for_sec_session()) ddl->shared_mem[1].mem_type = DDL_CMD_MEM; else ddl->shared_mem[1].mem_type = DDL_FW_MEM; ptr = ddl_pmem_alloc(&ddl->shared_mem[1], DDL_FW_AUX_HOST_CMD_SPACE_SIZE, 0); if (!ptr) { ddl_pmem_free(&ddl->shared_mem[0]); status = VCD_ERR_ALLOC_FAIL; } } if (!status) { memset(ddl->shared_mem[0].align_virtual_addr, 0, DDL_FW_AUX_HOST_CMD_SPACE_SIZE); if (ddl_context->frame_channel_depth == VCD_DUAL_FRAME_COMMAND_CHANNEL) { memset(ddl->shared_mem[1].align_virtual_addr, 0, DDL_FW_AUX_HOST_CMD_SPACE_SIZE); } DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_OPEN", ddl_get_state_string(ddl->client_state)); ddl->client_state = DDL_CLIENT_OPEN; ddl->codec_data.hdr.decoding = decoding; ddl->decoding = decoding; if (!res_trk_check_for_sec_session()) ddl_set_default_meta_data_hdr(ddl); ddl_set_initial_default_values(ddl); *ddl_handle = (u32 *) ddl; } else { ddl_pmem_free(&ddl->shared_mem[0]); if (ddl_context->frame_channel_depth == VCD_DUAL_FRAME_COMMAND_CHANNEL) ddl_pmem_free(&ddl->shared_mem[1]); ddl_client_transact(DDL_FREE_CLIENT, &ddl); } return status; } u32 ddl_close(u32 **ddl_handle) { struct ddl_context *ddl_context; struct ddl_client_context **pp_ddl = (struct ddl_client_context **)ddl_handle; DDL_MSG_HIGH("ddl_close"); if (!pp_ddl || !*pp_ddl) { DDL_MSG_ERROR("ddl_close:Bad_handle"); return VCD_ERR_BAD_HANDLE; } ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_close:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (!DDLCLIENT_STATE_IS(*pp_ddl, DDL_CLIENT_OPEN)) { DDL_MSG_ERROR("ddl_close:Not_in_open_state"); return VCD_ERR_ILLEGAL_OP; } ddl_pmem_free(&(*pp_ddl)->shared_mem[0]); if (ddl_context->frame_channel_depth == VCD_DUAL_FRAME_COMMAND_CHANNEL) ddl_pmem_free(&(*pp_ddl)->shared_mem[1]); DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_INVALID", ddl_get_state_string((*pp_ddl)->client_state)); (*pp_ddl)->client_state = DDL_CLIENT_INVALID; ddl_codec_type_transact(*pp_ddl, true, (enum vcd_codec)0); ddl_client_transact(DDL_FREE_CLIENT, pp_ddl); return VCD_S_SUCCESS; } u32 ddl_encode_start(u32 *ddl_handle, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *) ddl_handle; struct ddl_context *ddl_context; struct ddl_encoder_data *encoder; void *ptr; u32 status = VCD_S_SUCCESS; DDL_MSG_HIGH("ddl_encode_start"); if (first_time < 2) { ddl_reset_core_time_variables(ENC_OP_TIME); first_time++; } ddl_set_core_start_time(__func__, ENC_OP_TIME); ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_enc_start:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { DDL_MSG_ERROR("ddl_enc_start:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || ddl->decoding) { DDL_MSG_ERROR("ddl_enc_start:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) { DDL_MSG_ERROR("ddl_enc_start:Not_opened"); return VCD_ERR_ILLEGAL_OP; } if (!ddl_encoder_ready_to_start(ddl)) { DDL_MSG_ERROR("ddl_enc_start:Err_param_settings"); return VCD_ERR_ILLEGAL_OP; } encoder = &ddl->codec_data.encoder; if (DDL_IS_LTR_ENABLED(encoder)) { DDL_MSG_HIGH("LTR enabled, mode %u count %u", (u32)encoder->ltr_control.ltrmode.ltr_mode, (u32)encoder->ltr_control.ltr_count); status = ddl_allocate_ltr_list(&encoder->ltr_control); if (status) { DDL_MSG_ERROR("%s: allocate ltr list failed", __func__); return status; } else { ddl_clear_ltr_list(&encoder->ltr_control, false); } encoder->num_references_for_p_frame = 2; encoder->ltr_control.callback_reqd = false; encoder->ltr_control.curr_ltr_id = (u32)DDL_LTR_FRAME_START_ID; DDL_MSG_HIGH("num_ref_for_p_frames %u, curr_ltr_id = %u", (u32)encoder->num_references_for_p_frame, (u32)encoder->ltr_control.curr_ltr_id); } status = ddl_allocate_enc_hw_buffers(ddl); if (status) return status; #ifdef DDL_BUF_LOG ddl_list_buffers(ddl); #endif encoder->seq_header.mem_type = DDL_MM_MEM; ptr = ddl_pmem_alloc(&encoder->seq_header, DDL_ENC_SEQHEADER_SIZE, DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!ptr) { ddl_free_enc_hw_buffers(ddl); DDL_MSG_ERROR("ddl_enc_start:Seq_hdr_alloc_failed"); return VCD_ERR_ALLOC_FAIL; } msm_ion_do_cache_op(ddl_context->video_ion_client, encoder->seq_header.alloc_handle, encoder->seq_header.virtual_base_addr, encoder->seq_header.buffer_size, ION_IOC_CLEAN_INV_CACHES); if (encoder->slice_delivery_info.enable) { DDL_MSG_LOW("%s: slice mode allocate memory for struct\n", __func__); ptr = ddl_pmem_alloc(&encoder->batch_frame.slice_batch_in, DDL_ENC_SLICE_BATCH_INPSTRUCT_SIZE, DDL_LINEAR_BUFFER_ALIGN_BYTES); if (ptr) { ptr = ddl_pmem_alloc( &encoder->batch_frame.slice_batch_out, DDL_ENC_SLICE_BATCH_OUTSTRUCT_SIZE, DDL_LINEAR_BUFFER_ALIGN_BYTES); } if (!ptr) { ddl_pmem_free(&encoder->batch_frame.slice_batch_in); ddl_pmem_free(&encoder->batch_frame.slice_batch_out); ddl_free_enc_hw_buffers(ddl); ddl_pmem_free(&encoder->seq_header); DDL_MSG_ERROR("ddlEncStart:SeqHdrAllocFailed"); return VCD_ERR_ALLOC_FAIL; } } if (!ddl_take_command_channel(ddl_context, ddl, client_data)) return VCD_ERR_BUSY; ddl_vidc_channel_set(ddl); return status; } u32 ddl_decode_start(u32 *ddl_handle, struct vcd_sequence_hdr *header, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *) ddl_handle; struct ddl_context *ddl_context; struct ddl_decoder_data *decoder; u32 status = VCD_S_SUCCESS; DDL_MSG_HIGH("ddl_decode_start"); ddl_reset_core_time_variables(DEC_OP_TIME); ddl_reset_core_time_variables(DEC_IP_TIME); ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_dec_start:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { DDL_MSG_ERROR("ddl_dec_start:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || !ddl->decoding) { DDL_MSG_ERROR("ddl_dec_start:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) { DDL_MSG_ERROR("ddl_dec_start:Not_in_opened_state"); return VCD_ERR_ILLEGAL_OP; } if ((header) && ((!header->sequence_header_len) || (!header->sequence_header))) { DDL_MSG_ERROR("ddl_dec_start:Bad_param_seq_header"); return VCD_ERR_ILLEGAL_PARM; } if (!ddl_decoder_ready_to_start(ddl, header)) { DDL_MSG_ERROR("ddl_dec_start:Err_param_settings"); return VCD_ERR_ILLEGAL_OP; } decoder = &ddl->codec_data.decoder; status = ddl_allocate_dec_hw_buffers(ddl); if (status) return status; #ifdef DDL_BUF_LOG ddl_list_buffers(ddl); #endif if (!ddl_take_command_channel(ddl_context, ddl, client_data)) return VCD_ERR_BUSY; if (header) { decoder->header_in_start = true; decoder->decode_config = *header; } else { decoder->header_in_start = false; decoder->decode_config.sequence_header_len = 0; } ddl_vidc_channel_set(ddl); return status; } u32 ddl_decode_frame(u32 *ddl_handle, struct ddl_frame_data_tag *input_bits, void *client_data) { u32 vcd_status = VCD_S_SUCCESS; struct ddl_client_context *ddl = (struct ddl_client_context *) ddl_handle; struct ddl_context *ddl_context; struct ddl_decoder_data *decoder; DDL_MSG_MED("ddl_decode_frame"); ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_dec_frame:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { DDL_MSG_ERROR("ddl_dec_frame:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || !ddl->decoding) { DDL_MSG_ERROR("ddl_dec_frame:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!input_bits || ((!input_bits->vcd_frm.physical || !input_bits->vcd_frm.data_len) && (!(VCD_FRAME_FLAG_EOS & input_bits->vcd_frm.flags)))) { DDL_MSG_ERROR("ddl_dec_frame:Bad_input_param"); return VCD_ERR_ILLEGAL_PARM; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) { DDL_MSG_ERROR("Dec_frame:Wrong_state"); return VCD_ERR_ILLEGAL_OP; } decoder = &(ddl->codec_data.decoder); if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) && !ddl->codec_data.decoder.dp_buf.no_of_dec_pic_buf) { DDL_MSG_ERROR("ddl_dec_frame:Dpbs_requied"); return VCD_ERR_ILLEGAL_OP; } if (!ddl_take_command_channel(ddl_context, ddl, client_data)) return VCD_ERR_BUSY; ddl->input_frame = *input_bits; if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) ddl_vidc_decode_frame_run(ddl); else { if (!ddl->codec_data.decoder.dp_buf.no_of_dec_pic_buf) { DDL_MSG_ERROR("ddl_dec_frame:Dpbs_requied"); vcd_status = VCD_ERR_ILLEGAL_OP; } else if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) { vcd_status = ddl_vidc_decode_set_buffers(ddl); if (vcd_status) ddl_release_command_channel(ddl_context, ddl->command_channel); } else if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC)) { if (decoder->codec.codec == VCD_CODEC_DIVX_3) { if ((!decoder->client_frame_size.width) || (!decoder->client_frame_size.height)) return VCD_ERR_ILLEGAL_OP; } ddl->codec_data.decoder.decode_config.sequence_header = ddl->input_frame.vcd_frm.physical; ddl->codec_data.decoder.decode_config.sequence_header_len = ddl->input_frame.vcd_frm.data_len; ddl_vidc_decode_init_codec(ddl); } else { DDL_MSG_ERROR("Dec_frame:Wrong_state"); vcd_status = VCD_ERR_ILLEGAL_OP; } if (vcd_status) DDL_IDLE(ddl_context); } return vcd_status; } u32 ddl_encode_frame(u32 *ddl_handle, struct ddl_frame_data_tag *input_frame, struct ddl_frame_data_tag *output_bit, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *) ddl_handle; struct ddl_context *ddl_context; struct ddl_encoder_data *encoder = &ddl->codec_data.encoder; u32 vcd_status = VCD_S_SUCCESS; if (encoder->slice_delivery_info.enable) { return ddl_encode_frame_batch(ddl_handle, input_frame, output_bit, 1, encoder->slice_delivery_info.num_slices, client_data); } ddl_set_core_start_time(__func__, ENC_OP_TIME); ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_enc_frame:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { DDL_MSG_ERROR("ddl_enc_frame:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || ddl->decoding) { DDL_MSG_ERROR("ddl_enc_frame:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!input_frame || !input_frame->vcd_frm.physical || !input_frame->vcd_frm.data_len) { DDL_MSG_ERROR("ddl_enc_frame:Bad_input_params"); return VCD_ERR_ILLEGAL_PARM; } if ((((u32) input_frame->vcd_frm.physical + input_frame->vcd_frm.offset) & (DDL_STREAMBUF_ALIGN_GUARD_BYTES))) { DDL_MSG_ERROR("ddl_enc_frame:Un_aligned_yuv_start_address"); return VCD_ERR_ILLEGAL_PARM; } if (!output_bit || !output_bit->vcd_frm.physical || !output_bit->vcd_frm.alloc_len) { DDL_MSG_ERROR("ddl_enc_frame:Bad_output_params"); return VCD_ERR_ILLEGAL_PARM; } if ((ddl->codec_data.encoder.output_buf_req.sz + output_bit->vcd_frm.offset) > output_bit->vcd_frm.alloc_len) DDL_MSG_ERROR("ddl_enc_frame:offset_large," "Exceeds_min_buf_size"); if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) { DDL_MSG_ERROR("ddl_enc_frame:Wrong_state"); return VCD_ERR_ILLEGAL_OP; } if (!ddl_take_command_channel(ddl_context, ddl, client_data)) return VCD_ERR_BUSY; ddl->input_frame = *input_frame; ddl->output_frame = *output_bit; if (ddl->codec_data.encoder.i_period.b_frames > 0) { if (!ddl->b_count) { ddl->first_output_frame = *output_bit; ddl->b_count++; } else if (ddl->codec_data.encoder.i_period.b_frames >= ddl->b_count) { ddl->extra_output_frame[ddl->b_count-1] = *output_bit; ddl->output_frame = ddl->first_output_frame; ddl->b_count++; } } ddl_insert_input_frame_to_pool(ddl, input_frame); if (!vcd_status) ddl_vidc_encode_frame_run(ddl); else DDL_MSG_ERROR("insert to frame pool failed %u", vcd_status); return vcd_status; } u32 ddl_encode_frame_batch(u32 *ddl_handle, struct ddl_frame_data_tag *input_frame, struct ddl_frame_data_tag *output_bit, u32 num_in_frames, u32 num_out_frames, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *) ddl_handle; struct ddl_context *ddl_context; u32 vcd_status = VCD_S_SUCCESS; struct ddl_encoder_data *encoder; DDL_MSG_LOW("ddl_encode_frame_batch"); ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_enc_frame:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { DDL_MSG_ERROR("ddl_enc_frame:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || ddl->decoding) { DDL_MSG_ERROR("ddl_enc_frame:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!input_frame || !input_frame->vcd_frm.physical || !input_frame->vcd_frm.data_len) { DDL_MSG_ERROR("ddl_enc_frame:Bad_input_params"); return VCD_ERR_ILLEGAL_PARM; } if ((((u32) input_frame->vcd_frm.physical + input_frame->vcd_frm.offset) & (DDL_STREAMBUF_ALIGN_GUARD_BYTES))) { DDL_MSG_ERROR("ddl_enc_frame:Un_aligned_yuv_start_address"); return VCD_ERR_ILLEGAL_PARM; } if (!output_bit || !output_bit->vcd_frm.physical || !output_bit->vcd_frm.alloc_len) { DDL_MSG_ERROR("ddl_enc_frame:Bad_output_params"); return VCD_ERR_ILLEGAL_PARM; } if ((ddl->codec_data.encoder.output_buf_req.sz + output_bit->vcd_frm.offset) > output_bit->vcd_frm.alloc_len) DDL_MSG_ERROR("ddl_enc_frame:offset_large," "Exceeds_min_buf_size"); if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) { DDL_MSG_ERROR("ddl_enc_frame:Wrong_state"); return VCD_ERR_ILLEGAL_OP; } if (!ddl_take_command_channel(ddl_context, ddl, client_data)) return VCD_ERR_BUSY; encoder = &ddl->codec_data.encoder; if (encoder->slice_delivery_info.enable) { DDL_MEMCPY((void *)&(encoder->batch_frame.output_frame[0]), (void *)output_bit, sizeof(struct ddl_frame_data_tag) * num_out_frames); encoder->batch_frame.num_output_frames = num_out_frames; ddl->input_frame = *input_frame; vcd_status = ddl_insert_input_frame_to_pool(ddl, input_frame); if (!vcd_status) ddl_vidc_encode_slice_batch_run(ddl); else DDL_MSG_ERROR("insert to frame pool failed %u", vcd_status); } return vcd_status; } u32 ddl_decode_end(u32 *ddl_handle, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *) ddl_handle; struct ddl_context *ddl_context; DDL_MSG_HIGH("ddl_decode_end"); ddl_reset_core_time_variables(DEC_OP_TIME); ddl_reset_core_time_variables(DEC_IP_TIME); ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_dec_end:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { DDL_MSG_ERROR("ddl_dec_end:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || !ddl->decoding) { DDL_MSG_ERROR("ddl_dec_end:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FAVIDC_ERROR)) { DDL_MSG_ERROR("ddl_dec_end:Wrong_state"); return VCD_ERR_ILLEGAL_OP; } if (!ddl_take_command_channel(ddl_context, ddl, client_data)) return VCD_ERR_BUSY; ddl_vidc_channel_end(ddl); return VCD_S_SUCCESS; } u32 ddl_encode_end(u32 *ddl_handle, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *) ddl_handle; struct ddl_context *ddl_context; DDL_MSG_HIGH("ddl_encode_end"); ddl_reset_core_time_variables(ENC_OP_TIME); ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_enc_end:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { DDL_MSG_ERROR("ddl_enc_end:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || ddl->decoding) { DDL_MSG_ERROR("ddl_enc_end:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FAVIDC_ERROR)) { DDL_MSG_ERROR("ddl_enc_end:Wrong_state"); return VCD_ERR_ILLEGAL_OP; } if (!ddl_take_command_channel(ddl_context, ddl, client_data)) return VCD_ERR_BUSY; ddl_vidc_channel_end(ddl); return VCD_S_SUCCESS; } u32 ddl_reset_hw(u32 mode) { struct ddl_context *ddl_context; struct ddl_client_context *ddl; u32 i; DDL_MSG_HIGH("ddl_reset_hw"); DDL_MSG_LOW("ddl_reset_hw:called"); ddl_context = ddl_get_context(); ddl_context->cmd_state = DDL_CMD_INVALID; DDL_BUSY(ddl_context); if (ddl_context->core_virtual_base_addr) { vidc_1080p_do_sw_reset(VIDC_1080P_RESET_IN_SEQ_FIRST_STAGE); msleep(DDL_SW_RESET_SLEEP); vidc_1080p_do_sw_reset(VIDC_1080P_RESET_IN_SEQ_SECOND_STAGE); msleep(DDL_SW_RESET_SLEEP); ddl_context->core_virtual_base_addr = NULL; } ddl_context->device_state = DDL_DEVICE_NOTINIT; for (i = 0; i < VCD_MAX_NO_CLIENT; i++) { ddl = ddl_context->ddl_clients[i]; ddl_context->ddl_clients[i] = NULL; if (ddl) { ddl_release_client_internal_buffers(ddl); ddl_client_transact(DDL_FREE_CLIENT, &ddl); } } ddl_release_context_buffers(ddl_context); memset(ddl_context, 0, sizeof(struct ddl_context)); return true; }
gpl-2.0
NamelessRom/android_kernel_google_msm
net/bluetooth/l2cap_sock.c
2197
31604
/* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2011-2012 The Linux Foundation. All rights reserved. Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> Copyright (C) 2010 Google Inc. Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth L2CAP sockets. */ #include <linux/interrupt.h> #include <linux/module.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/smp.h> #include <net/bluetooth/amp.h> /* ---- L2CAP timers ---- */ static void l2cap_sock_timeout(unsigned long arg) { struct sock *sk = (struct sock *) arg; int reason; BT_DBG("sock %p state %d", sk, sk->sk_state); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* sk is owned by user. Try again later */ l2cap_sock_set_timer(sk, HZ / 5); bh_unlock_sock(sk); sock_put(sk); return; } if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG) reason = ECONNREFUSED; else if (sk->sk_state == BT_CONNECT && l2cap_pi(sk)->sec_level != BT_SECURITY_SDP) reason = ECONNREFUSED; else reason = ETIMEDOUT; __l2cap_sock_close(sk, reason); bh_unlock_sock(sk); l2cap_sock_kill(sk); sock_put(sk); } void l2cap_sock_set_timer(struct sock *sk, long timeout) { BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout); sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout); } void l2cap_sock_clear_timer(struct sock *sk) { BT_DBG("sock %p state %d", sk, sk->sk_state); sk_stop_timer(sk, &sk->sk_timer); } int l2cap_sock_le_params_valid(struct bt_le_params *le_params) { if (!le_params || le_params->latency > BT_LE_LATENCY_MAX || le_params->scan_window > BT_LE_SCAN_WINDOW_MAX || le_params->scan_interval < BT_LE_SCAN_INTERVAL_MIN || le_params->scan_window > le_params->scan_interval || le_params->interval_min < BT_LE_CONN_INTERVAL_MIN || le_params->interval_max > BT_LE_CONN_INTERVAL_MAX || le_params->interval_min > le_params->interval_max || le_params->supervision_timeout < BT_LE_SUP_TO_MIN || le_params->supervision_timeout > BT_LE_SUP_TO_MAX) { return 0; } return 1; } int l2cap_sock_le_conn_update_params_valid(struct bt_le_params *le_params) { if (!le_params || le_params->latency > BT_LE_LATENCY_MAX || le_params->interval_min < BT_LE_CONN_INTERVAL_MIN || le_params->interval_max > BT_LE_CONN_INTERVAL_MAX || le_params->interval_min > le_params->interval_max || le_params->supervision_timeout < BT_LE_SUP_TO_MIN || le_params->supervision_timeout > BT_LE_SUP_TO_MAX) { return 0; } return 1; } static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src) { struct sock *sk; struct hlist_node *node; sk_for_each(sk, node, &l2cap_sk_list.head) if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src)) goto found; sk = NULL; found: return sk; } static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { struct sock *sk = sock->sk; struct sockaddr_l2 la; int len, err = 0; BT_DBG("sk %p", sk); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&la, 0, sizeof(la)); len = min_t(unsigned int, sizeof(la), alen); memcpy(&la, addr, len); if (la.l2_cid && la.l2_psm) return -EINVAL; lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (la.l2_psm) { __u16 psm = __le16_to_cpu(la.l2_psm); /* PSM must be odd and lsb of upper byte must be 0 */ if ((psm & 0x0101) != 0x0001) { err = -EINVAL; goto done; } /* Restrict usage of well-known PSMs */ if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) { err = -EACCES; goto done; } } write_lock_bh(&l2cap_sk_list.lock); if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) { err = -EADDRINUSE; } else { /* Save source address */ bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); l2cap_pi(sk)->psm = la.l2_psm; l2cap_pi(sk)->sport = la.l2_psm; sk->sk_state = BT_BOUND; if (__le16_to_cpu(la.l2_psm) == 0x0001 || __le16_to_cpu(la.l2_psm) == 0x0003) l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; } if (la.l2_cid) l2cap_pi(sk)->scid = la.l2_cid; write_unlock_bh(&l2cap_sk_list.lock); done: release_sock(sk); return err; } static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sock *sk = sock->sk; struct sockaddr_l2 la; int len, err = 0; BT_DBG("sk %p type %d mode %d state %d", sk, sk->sk_type, l2cap_pi(sk)->mode, sk->sk_state); if (!addr || alen < sizeof(addr->sa_family) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&la, 0, sizeof(la)); len = min_t(unsigned int, sizeof(la), alen); memcpy(&la, addr, len); if (la.l2_cid && la.l2_psm) return -EINVAL; lock_sock(sk); if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) && !(la.l2_psm || la.l2_cid || l2cap_pi(sk)->fixed_channel)) { err = -EINVAL; goto done; } switch (l2cap_pi(sk)->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) break; /* fall through */ default: err = -ENOTSUPP; goto done; } switch (sk->sk_state) { case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: /* Already connecting */ goto wait; case BT_CONNECTED: /* Already connected */ err = -EISCONN; goto done; case BT_OPEN: case BT_BOUND: /* Can connect */ break; default: err = -EBADFD; goto done; } /* PSM must be odd and lsb of upper byte must be 0 */ if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !l2cap_pi(sk)->fixed_channel && sk->sk_type != SOCK_RAW && !la.l2_cid) { BT_DBG("Bad PSM 0x%x", (int)__le16_to_cpu(la.l2_psm)); err = -EINVAL; goto done; } /* Set destination address and psm */ bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); l2cap_pi(sk)->psm = la.l2_psm; l2cap_pi(sk)->dcid = la.l2_cid; err = l2cap_do_connect(sk); if (err) goto done; wait: err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: if (err) BT_ERR("failed %d", err); release_sock(sk); return err; } static int l2cap_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) || sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } switch (l2cap_pi(sk)->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) break; /* fall through */ default: err = -ENOTSUPP; goto done; } if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->scid) { bdaddr_t *src = &bt_sk(sk)->src; u16 psm; err = -EINVAL; write_lock_bh(&l2cap_sk_list.lock); for (psm = 0x1001; psm < 0x1100; psm += 2) if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) { l2cap_pi(sk)->psm = cpu_to_le16(psm); l2cap_pi(sk)->sport = cpu_to_le16(psm); err = 0; break; } write_unlock_bh(&l2cap_sk_list.lock); if (err < 0) goto done; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; done: release_sock(sk); return err; } static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; goto done; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (!(nsk = bt_accept_dequeue(sk, newsock))) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { err = -EAGAIN; break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", nsk); done: release_sock(sk); return err; } static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) { struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); addr->sa_family = AF_BLUETOOTH; *len = sizeof(struct sockaddr_l2); if (peer) { la->l2_psm = l2cap_pi(sk)->psm; bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid); } else { la->l2_psm = l2cap_pi(sk)->sport; bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid); } return 0; } static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct l2cap_options opts; struct l2cap_conninfo cinfo; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case L2CAP_OPTIONS: memset(&opts, 0, sizeof(opts)); opts.imtu = l2cap_pi(sk)->imtu; opts.omtu = l2cap_pi(sk)->omtu; opts.flush_to = l2cap_pi(sk)->flush_to; opts.mode = l2cap_pi(sk)->mode; opts.fcs = l2cap_pi(sk)->fcs; opts.max_tx = l2cap_pi(sk)->max_tx; opts.txwin_size = l2cap_pi(sk)->tx_win; len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *) &opts, len)) err = -EFAULT; break; case L2CAP_LM: switch (l2cap_pi(sk)->sec_level) { case BT_SECURITY_LOW: opt = L2CAP_LM_AUTH; break; case BT_SECURITY_MEDIUM: opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT; break; case BT_SECURITY_HIGH: opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE; break; default: opt = 0; break; } if (l2cap_pi(sk)->role_switch) opt |= L2CAP_LM_MASTER; if (l2cap_pi(sk)->force_reliable) opt |= L2CAP_LM_RELIABLE; if (l2cap_pi(sk)->flushable) opt |= L2CAP_LM_FLUSHABLE; if (put_user(opt, (u32 __user *) optval)) err = -EFAULT; break; case L2CAP_CONNINFO: if (sk->sk_state != BT_CONNECTED && !(sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup)) { err = -ENOTCONN; break; } cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle; memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct bt_security sec; struct bt_power pwr; int len, err = 0; BT_DBG("sk %p", sk); if (level == SOL_L2CAP) return l2cap_sock_getsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_RAW) { err = -EINVAL; break; } memset(&sec, 0, sizeof(sec)); sec.level = l2cap_pi(sk)->sec_level; if (sk->sk_state == BT_CONNECTED) { sec.key_size = l2cap_pi(sk)->conn->hcon->enc_key_size; sec.level = l2cap_pi(sk)->conn->hcon->sec_level; } len = min_t(unsigned int, len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) err = -EFAULT; break; case BT_POWER: if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_RAW) { err = -EINVAL; break; } pwr.force_active = l2cap_pi(sk)->force_active; len = min_t(unsigned int, len, sizeof(pwr)); if (copy_to_user(optval, (char *) &pwr, len)) err = -EFAULT; break; case BT_AMP_POLICY: if (put_user(l2cap_pi(sk)->amp_pref, (u32 __user *) optval)) err = -EFAULT; break; case BT_LE_PARAMS: if (l2cap_pi(sk)->scid != L2CAP_CID_LE_DATA) { err = -EINVAL; break; } if (copy_to_user(optval, (char *) &bt_sk(sk)->le_params, sizeof(bt_sk(sk)->le_params))) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2cap_options opts; int len, le_sock, err = 0; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); le_sock = l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA; switch (optname) { case L2CAP_OPTIONS: if (sk->sk_state == BT_CONNECTED && !le_sock) { err = -EINVAL; break; } opts.imtu = l2cap_pi(sk)->imtu; opts.omtu = l2cap_pi(sk)->omtu; opts.flush_to = l2cap_pi(sk)->flush_to; opts.mode = l2cap_pi(sk)->mode; opts.fcs = l2cap_pi(sk)->fcs; opts.max_tx = l2cap_pi(sk)->max_tx; opts.txwin_size = l2cap_pi(sk)->tx_win; len = min_t(unsigned int, sizeof(opts), optlen); if (copy_from_user((char *) &opts, optval, len)) { err = -EFAULT; break; } if ((opts.imtu || opts.omtu) && le_sock && (sk->sk_state == BT_CONNECTED)) { if (opts.imtu >= L2CAP_LE_DEFAULT_MTU) l2cap_pi(sk)->imtu = opts.imtu; if (opts.omtu >= L2CAP_LE_DEFAULT_MTU) l2cap_pi(sk)->omtu = opts.omtu; if (opts.imtu < L2CAP_LE_DEFAULT_MTU || opts.omtu < L2CAP_LE_DEFAULT_MTU) err = -EINVAL; break; } if (opts.txwin_size < 1 || opts.txwin_size > L2CAP_TX_WIN_MAX_EXTENDED) { err = -EINVAL; break; } l2cap_pi(sk)->mode = opts.mode; switch (l2cap_pi(sk)->mode) { case L2CAP_MODE_BASIC: l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE; break; case L2CAP_MODE_STREAMING: if (!disable_ertm) { /* No fallback to ERTM or Basic mode */ l2cap_pi(sk)->conf_state |= L2CAP_CONF_STATE2_DEVICE; break; } err = -EINVAL; break; case L2CAP_MODE_ERTM: if (!disable_ertm) break; /* fall through */ default: err = -EINVAL; break; } l2cap_pi(sk)->imtu = opts.imtu; l2cap_pi(sk)->omtu = opts.omtu; l2cap_pi(sk)->fcs = opts.fcs; l2cap_pi(sk)->max_tx = opts.max_tx; l2cap_pi(sk)->tx_win = opts.txwin_size; l2cap_pi(sk)->flush_to = opts.flush_to; break; case L2CAP_LM: if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } if (opt & L2CAP_LM_AUTH) l2cap_pi(sk)->sec_level = BT_SECURITY_LOW; if (opt & L2CAP_LM_ENCRYPT) l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM; if (opt & L2CAP_LM_SECURE) l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH; l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER); l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE); l2cap_pi(sk)->flushable = (opt & L2CAP_LM_FLUSHABLE); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct bt_security sec; struct bt_power pwr; struct bt_le_params le_params; struct l2cap_conn *conn; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (level == SOL_L2CAP) return l2cap_sock_setsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_RAW) { err = -EINVAL; break; } sec.level = BT_SECURITY_LOW; len = min_t(unsigned int, sizeof(sec), optlen); if (copy_from_user((char *) &sec, optval, len)) { err = -EFAULT; break; } if (sec.level < BT_SECURITY_LOW || sec.level > BT_SECURITY_HIGH) { err = -EINVAL; break; } l2cap_pi(sk)->sec_level = sec.level; conn = l2cap_pi(sk)->conn; if (conn && l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) { if (!conn->hcon->out) { err = -EINVAL; break; } if (smp_conn_security(conn, sec.level)) break; err = 0; sk->sk_state = BT_CONFIG; } break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } bt_sk(sk)->defer_setup = opt; break; case BT_POWER: if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_RAW) { err = -EINVAL; break; } pwr.force_active = 1; len = min_t(unsigned int, sizeof(pwr), optlen); if (copy_from_user((char *) &pwr, optval, len)) { err = -EFAULT; break; } l2cap_pi(sk)->force_active = pwr.force_active; break; case BT_AMP_POLICY: if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } if ((opt > BT_AMP_POLICY_PREFER_AMP) || ((l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) && (l2cap_pi(sk)->mode != L2CAP_MODE_STREAMING))) { err = -EINVAL; break; } l2cap_pi(sk)->amp_pref = (u8) opt; BT_DBG("BT_AMP_POLICY now %d", opt); if ((sk->sk_state == BT_CONNECTED) && (l2cap_pi(sk)->amp_move_role == L2CAP_AMP_MOVE_NONE)) l2cap_amp_move_init(sk); break; case BT_FLUSHABLE: if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } l2cap_pi(sk)->flushable = opt; break; case BT_LE_PARAMS: if (l2cap_pi(sk)->scid != L2CAP_CID_LE_DATA) { err = -EINVAL; break; } if (copy_from_user((char *) &le_params, optval, sizeof(struct bt_le_params))) { err = -EFAULT; break; } conn = l2cap_pi(sk)->conn; if (!conn || !conn->hcon || l2cap_pi(sk)->scid != L2CAP_CID_LE_DATA) { memcpy(&bt_sk(sk)->le_params, &le_params, sizeof(le_params)); break; } if (!conn->hcon->out || !l2cap_sock_le_conn_update_params_valid( &le_params)) { err = -EINVAL; break; } memcpy(&bt_sk(sk)->le_params, &le_params, sizeof(le_params)); hci_le_conn_update(conn->hcon, le_params.interval_min, le_params.interval_max, le_params.latency, le_params.supervision_timeout); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct l2cap_pinfo *pi = l2cap_pi(sk); struct sk_buff *skb; struct sk_buff_head seg_queue; int err; u8 amp_id; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; goto done; } /* Connectionless channel */ if (sk->sk_type == SOCK_DGRAM) { skb = l2cap_create_connless_pdu(sk, msg, len); if (IS_ERR(skb)) { err = PTR_ERR(skb); } else { l2cap_do_send(sk, skb); err = len; } goto done; } switch (pi->mode) { case L2CAP_MODE_BASIC: /* Check outgoing MTU */ if (len > pi->omtu) { err = -EMSGSIZE; goto done; } /* Create a basic PDU */ skb = l2cap_create_basic_pdu(sk, msg, len); if (IS_ERR(skb)) { err = PTR_ERR(skb); goto done; } l2cap_do_send(sk, skb); err = len; break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: /* Check outgoing MTU */ if (len > pi->omtu) { err = -EMSGSIZE; goto done; } __skb_queue_head_init(&seg_queue); /* Do segmentation before calling in to the state machine, * since it's possible to block while waiting for memory * allocation. */ amp_id = pi->amp_id; err = l2cap_segment_sdu(sk, &seg_queue, msg, len, 0); /* The socket lock is released while segmenting, so check * that the socket is still connected */ if (sk->sk_state != BT_CONNECTED) { __skb_queue_purge(&seg_queue); err = -ENOTCONN; } if (err) { BT_DBG("Error %d, sk_sndbuf %d, sk_wmem_alloc %d", err, sk->sk_sndbuf, atomic_read(&sk->sk_wmem_alloc)); break; } if (pi->amp_id != amp_id) { /* Channel moved while unlocked. Resegment. */ err = l2cap_resegment_queue(sk, &seg_queue); if (err) break; } if (pi->mode != L2CAP_MODE_STREAMING) err = l2cap_ertm_tx(sk, 0, &seg_queue, L2CAP_ERTM_EVENT_DATA_REQUEST); else err = l2cap_strm_tx(sk, &seg_queue); if (!err) err = len; /* If the skbs were not queued for sending, they'll still be in * seg_queue and need to be purged. */ __skb_queue_purge(&seg_queue); break; default: BT_DBG("bad state %1.1x", pi->mode); err = -EBADFD; } done: release_sock(sk); return err; } static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; int err; lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { struct l2cap_conn_rsp rsp; struct l2cap_conn *conn = l2cap_pi(sk)->conn; u8 buf[128]; if (l2cap_pi(sk)->amp_id) { /* Physical link must be brought up before connection * completes. */ amp_accept_physical(conn, l2cap_pi(sk)->amp_id, sk); release_sock(sk); return 0; } sk->sk_state = BT_CONFIG; rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) { release_sock(sk); return 0; } l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(sk, buf), buf); l2cap_pi(sk)->num_conf_req++; release_sock(sk); return 0; } release_sock(sk); if (sock->type == SOCK_STREAM) err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags); else err = bt_sock_recvmsg(iocb, sock, msg, len, flags); if (err >= 0) l2cap_ertm_recv_done(sk); return err; } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ void l2cap_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d", sk, sk->sk_state); /* Kill poor orphan */ bt_sock_unlink(&l2cap_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } /* Must be called on unlocked socket. */ static void l2cap_sock_close(struct sock *sk) { l2cap_sock_clear_timer(sk); lock_sock(sk); __l2cap_sock_close(sk, ECONNRESET); release_sock(sk); l2cap_sock_kill(sk); } static void l2cap_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted channels */ while ((sk = bt_accept_dequeue(parent, NULL))) l2cap_sock_close(sk); parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } void __l2cap_sock_close(struct sock *sk, int reason) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: l2cap_sock_cleanup_listen(sk); break; case BT_CONNECTED: case BT_CONFIG: if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) && conn->hcon->type == ACL_LINK) { l2cap_sock_set_timer(sk, sk->sk_sndtimeo); l2cap_send_disconn_req(conn, sk, reason); } else l2cap_chan_del(sk, reason); break; case BT_CONNECT2: if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) && conn->hcon->type == ACL_LINK) { struct l2cap_conn_rsp rsp; __u16 result; if (bt_sk(sk)->defer_setup) result = L2CAP_CR_SEC_BLOCK; else result = L2CAP_CR_BAD_PSM; sk->sk_state = BT_DISCONN; rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); } l2cap_chan_del(sk, reason); break; case BT_CONNECT: case BT_DISCONN: l2cap_chan_del(sk, reason); break; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } static int l2cap_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; lock_sock(sk); if (!sk->sk_shutdown) { if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { err = __l2cap_wait_ack(sk); l2cap_ertm_shutdown(sk); } sk->sk_shutdown = SHUTDOWN_MASK; l2cap_sock_clear_timer(sk); __l2cap_sock_close(sk, 0); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } if (!err && sk->sk_err) err = -sk->sk_err; release_sock(sk); return err; } static int l2cap_sock_release(struct socket *sock) { struct sock *sk = sock->sk; struct sock *sk2 = NULL; int err; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; /* If this is an ATT socket, find it's matching server/client */ if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) sk2 = l2cap_find_sock_by_fixed_cid_and_dir(L2CAP_CID_LE_DATA, &bt_sk(sk)->src, &bt_sk(sk)->dst, l2cap_pi(sk)->incoming ? 0 : 1); /* If matching socket found, request tear down */ BT_DBG("sock:%p companion:%p", sk, sk2); if (sk2) l2cap_sock_set_timer(sk2, 1); err = l2cap_sock_shutdown(sock, 2); sock_orphan(sk); l2cap_sock_kill(sk); return err; } static void l2cap_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); l2cap_ertm_destruct(sk); } static void set_default_config(struct l2cap_conf_prm *conf_prm) { conf_prm->fcs = L2CAP_FCS_CRC16; conf_prm->flush_to = L2CAP_DEFAULT_FLUSH_TO; } void l2cap_sock_init(struct sock *sk, struct sock *parent) { struct l2cap_pinfo *pi = l2cap_pi(sk); BT_DBG("sk %p parent %p", sk, parent); if (parent) { sk->sk_type = parent->sk_type; sk->sk_rcvbuf = parent->sk_rcvbuf; sk->sk_sndbuf = parent->sk_sndbuf; bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; pi->imtu = l2cap_pi(parent)->imtu; pi->omtu = l2cap_pi(parent)->omtu; pi->conf_state = l2cap_pi(parent)->conf_state; pi->mode = l2cap_pi(parent)->mode; pi->fcs = l2cap_pi(parent)->fcs; pi->max_tx = l2cap_pi(parent)->max_tx; pi->tx_win = l2cap_pi(parent)->tx_win; pi->sec_level = l2cap_pi(parent)->sec_level; pi->role_switch = l2cap_pi(parent)->role_switch; pi->force_reliable = l2cap_pi(parent)->force_reliable; pi->flushable = l2cap_pi(parent)->flushable; pi->force_active = l2cap_pi(parent)->force_active; pi->amp_pref = l2cap_pi(parent)->amp_pref; } else { pi->imtu = L2CAP_DEFAULT_MTU; pi->omtu = 0; if (!disable_ertm && sk->sk_type == SOCK_STREAM) { pi->mode = L2CAP_MODE_ERTM; pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; } else { pi->mode = L2CAP_MODE_BASIC; } pi->reconf_state = L2CAP_RECONF_NONE; pi->max_tx = L2CAP_DEFAULT_MAX_TX; pi->fcs = L2CAP_FCS_CRC16; pi->tx_win = L2CAP_DEFAULT_TX_WINDOW; pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; pi->force_reliable = 0; pi->flushable = 0; pi->force_active = 1; pi->amp_pref = BT_AMP_POLICY_REQUIRE_BR_EDR; } /* Default config options */ sk->sk_backlog_rcv = l2cap_data_channel; pi->ampcon = NULL; pi->ampchan = NULL; pi->conf_len = 0; pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; pi->scid = 0; pi->dcid = 0; pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED; pi->ack_win = pi->tx_win; pi->extended_control = 0; pi->local_conf.fcs = pi->fcs; pi->local_conf.flush_to = pi->flush_to; set_default_config(&pi->remote_conf); skb_queue_head_init(TX_QUEUE(sk)); skb_queue_head_init(SREJ_QUEUE(sk)); } static struct proto l2cap_proto = { .name = "L2CAP", .owner = THIS_MODULE, .obj_size = sizeof(struct l2cap_pinfo) }; struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) { struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sk->sk_destruct = l2cap_sock_destruct; sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk); bt_sock_link(&l2cap_sk_list, sk); return sk; } static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) return -EPERM; sock->ops = &l2cap_sock_ops; sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC); if (!sk) return -ENOMEM; l2cap_sock_init(sk, NULL); return 0; } const struct proto_ops l2cap_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = l2cap_sock_release, .bind = l2cap_sock_bind, .connect = l2cap_sock_connect, .listen = l2cap_sock_listen, .accept = l2cap_sock_accept, .getname = l2cap_sock_getname, .sendmsg = l2cap_sock_sendmsg, .recvmsg = l2cap_sock_recvmsg, .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, .shutdown = l2cap_sock_shutdown, .setsockopt = l2cap_sock_setsockopt, .getsockopt = l2cap_sock_getsockopt }; static const struct net_proto_family l2cap_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = l2cap_sock_create, }; int __init l2cap_init_sockets(void) { int err; err = proto_register(&l2cap_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops); if (err < 0) goto error; BT_INFO("L2CAP socket layer initialized"); return 0; error: BT_ERR("L2CAP socket registration failed"); proto_unregister(&l2cap_proto); return err; } void l2cap_cleanup_sockets(void) { if (bt_sock_unregister(BTPROTO_L2CAP) < 0) BT_ERR("L2CAP socket unregistration failed"); proto_unregister(&l2cap_proto); }
gpl-2.0
defconoi/Unleashed-N5
drivers/input/touchscreen/synaptics/rmi_f19.c
3733
19210
/** * * Synaptics Register Mapped Interface (RMI4) Function $11 support for 2D. * Copyright (c) 2007 - 2011, Synaptics Incorporated * */ /* * This file is licensed under the GPL2 license. * *############################################################################# * GPL * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * *############################################################################# */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/input/rmi_platformdata.h> #include <linux/module.h> #include "rmi.h" #include "rmi_drvr.h" #include "rmi_bus.h" #include "rmi_sensor.h" #include "rmi_function.h" #include "rmi_f19.h" struct f19_instance_data { struct rmi_F19_query *deviceInfo; struct rmi_F19_control *controlRegisters; bool *buttonDown; unsigned char buttonDataBufferSize; unsigned char *buttonDataBuffer; unsigned char *buttonMap; int fn19ControlRegisterSize; int fn19regCountForBitPerButton; int fn19btnUsageandfilterModeOffset; int fn19intEnableOffset; int fn19intEnableLen; int fn19singleBtnCtrlLen; int fn19singleBtnCtrlOffset; int fn19sensorMapCtrlOffset; int fn19sensorMapCtrlLen; int fn19singleBtnSensOffset; int fn19singleBtnSensLen; int fn19globalSensOffset; int fn19globalHystThreshOffset; }; static ssize_t rmi_f19_buttonCount_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t rmi_f19_buttonCount_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); DEVICE_ATTR(buttonCount, 0444, rmi_f19_buttonCount_show, rmi_f19_buttonCount_store); /* RO attr */ static ssize_t rmi_f19_buttonMap_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t rmi_f19_buttonMap_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); DEVICE_ATTR(buttonMap, 0664, rmi_f19_buttonMap_show, rmi_f19_buttonMap_store); /* RW attr */ /* * There is no attention function for F19 - it is left NULL * in the function table so it is not called. * */ /* * This reads in a sample and reports the F19 source data to the * input subsystem. It is used for both polling and interrupt driven * operation. This is called a lot so don't put in any informational * printks since they will slow things way down! */ void FN_19_inthandler(struct rmi_function_info *rmifninfo, unsigned int assertedIRQs) { struct rmi_function_device *function_device; struct f19_instance_data *instanceData; int button; instanceData = (struct f19_instance_data *) rmifninfo->fndata; function_device = rmifninfo->function_device; /* Read the button data. */ if (rmi_read_multiple(rmifninfo->sensor, rmifninfo->funcDescriptor.dataBaseAddr, instanceData->buttonDataBuffer, instanceData->buttonDataBufferSize)) { printk(KERN_ERR "%s: Failed to read button data registers.\n", __func__); return; } /* Generate events for buttons that change state. */ for (button = 0; button < instanceData->deviceInfo->buttonCount; button++) { int buttonReg; int buttonShift; bool buttonStatus; /* determine which data byte the button status is in */ buttonReg = button/4; /* bit shift to get button's status */ buttonShift = button % 8; buttonStatus = ((instanceData->buttonDataBuffer[buttonReg] >> buttonShift) & 0x01) != 0; /* if the button state changed from the last time report it and store the new state */ if (buttonStatus != instanceData->buttonDown[button]) { printk(KERN_DEBUG "%s: Button %d (code %d) -> %d.", __func__, button, instanceData->buttonMap[button], buttonStatus); /* Generate an event here. */ input_report_key(function_device->input, instanceData->buttonMap[button], buttonStatus); instanceData->buttonDown[button] = buttonStatus; } } input_sync(function_device->input); /* sync after groups of events */ } EXPORT_SYMBOL(FN_19_inthandler); int FN_19_config(struct rmi_function_info *rmifninfo) { int retval = 0; pr_debug("%s: RMI4 F19 config\n", __func__); /* TODO: Perform configuration. In particular, write any cached control * register values to the device. */ return retval; } EXPORT_SYMBOL(FN_19_config); /* Initialize any F19 specific params and settings - input * settings, device settings, etc. */ int FN_19_init(struct rmi_function_device *function_device) { int i, retval = 0; struct f19_instance_data *instance_data = function_device->rfi->fndata; struct rmi_f19_functiondata *functiondata = rmi_sensor_get_functiondata(function_device->sensor, RMI_F19_INDEX); printk(KERN_DEBUG "%s: RMI4 F19 init\n", __func__); if (functiondata) { if (functiondata->button_map) { if (functiondata->button_map->nbuttons != instance_data->deviceInfo->buttonCount) { printk(KERN_WARNING "%s: Platformdata button map size (%d) != number of buttons on device (%d) - ignored.", __func__, functiondata->button_map->nbuttons, instance_data->deviceInfo->buttonCount); } else if (!functiondata->button_map->map) { printk(KERN_WARNING "%s: Platformdata button map is missing!", __func__); } else { for (i = 0; i < functiondata->button_map->nbuttons; i++) instance_data->buttonMap[i] = functiondata->button_map->map[i]; } } } /* Set up any input events. */ set_bit(EV_SYN, function_device->input->evbit); set_bit(EV_KEY, function_device->input->evbit); /* set bits for each button...*/ for (i = 0; i < instance_data->deviceInfo->buttonCount; i++) { set_bit(instance_data->buttonMap[i], function_device->input->keybit); } printk(KERN_DEBUG "%s: Creating sysfs files.", __func__); retval = device_create_file(&function_device->dev, &dev_attr_buttonCount); if (retval) { printk(KERN_ERR "%s: Failed to create button count.", __func__); return retval; } retval = device_create_file(&function_device->dev, &dev_attr_buttonMap); if (retval) { printk(KERN_ERR "%s: Failed to create button map.", __func__); return retval; } return 0; } EXPORT_SYMBOL(FN_19_init); static int getControlRegisters(struct rmi_function_info *rmifninfo, struct rmi_function_descriptor *fndescr) { struct f19_instance_data *instanceData; unsigned char *fn19Control = NULL; int retval = 0; /* Get the instance data - it should have been allocated and stored in detect.*/ instanceData = rmifninfo->fndata; /* Check to make sure instanceData is really there before using.*/ if (!instanceData) { printk(KERN_ERR "%s: Error - instance data not initialized yet when getting fn19 control registers.\n", __func__); return -EINVAL; } /* Allocate memory for the control registers. */ instanceData->controlRegisters = kzalloc(sizeof(struct rmi_F19_control), GFP_KERNEL); if (!instanceData->controlRegisters) { printk(KERN_ERR "%s: Error allocating F19 control registers.\n", __func__); return -ENOMEM; } instanceData->fn19regCountForBitPerButton = (instanceData->deviceInfo->buttonCount + 7)/8; /* Need to compute the amount of data to read since it varies with the * number of buttons */ instanceData->fn19ControlRegisterSize = 1 /* 1 for filter mode and button usage bits */ + 2*instanceData->fn19regCountForBitPerButton /* interrupt enable bits and single button participation bits */ + 2*instanceData->deviceInfo->buttonCount /* sensormap registers + single button sensitivity registers */ + 2; /* 1 for global sensitivity adjust + 1 for global hysteresis threshold */ /* Allocate a temp memory buffer to read the control registers into */ fn19Control = kzalloc(instanceData->fn19ControlRegisterSize, GFP_KERNEL); if (!fn19Control) { printk(KERN_ERR "%s: Error allocating temp storage to read fn19 control info.\n", __func__); return -ENOMEM; } /* Grab a copy of the control registers. */ retval = rmi_read_multiple(rmifninfo->sensor, fndescr->controlBaseAddr, fn19Control, instanceData->fn19ControlRegisterSize); if (retval) { printk(KERN_ERR "%s: Failed to read F19 control registers.", __func__); return retval; } /* Copy over control registers data to the instance data */ instanceData->fn19btnUsageandfilterModeOffset = 0; instanceData->controlRegisters->buttonUsage = fn19Control[instanceData->fn19btnUsageandfilterModeOffset] & 0x3; instanceData->controlRegisters->filterMode = fn19Control[instanceData->fn19btnUsageandfilterModeOffset] & 0xc; /* Fill in interrupt enable registers */ instanceData->fn19intEnableOffset = 1; instanceData->fn19intEnableLen = instanceData->fn19regCountForBitPerButton; instanceData->controlRegisters->intEnableRegisters = kzalloc(instanceData->fn19intEnableLen, GFP_KERNEL); if (!instanceData->controlRegisters->intEnableRegisters) { printk(KERN_ERR "%s: Error allocating storage for interrupt enable control info.\n", __func__); return -ENOMEM; } memcpy(instanceData->controlRegisters->intEnableRegisters, &fn19Control[instanceData->fn19intEnableOffset], instanceData->fn19intEnableLen); /* Fill in single button control registers */ instanceData->fn19singleBtnCtrlOffset = instanceData->fn19intEnableOffset + instanceData->fn19intEnableLen; instanceData->fn19singleBtnCtrlLen = instanceData->fn19regCountForBitPerButton; instanceData->controlRegisters->singleButtonControl = kzalloc(instanceData->fn19singleBtnCtrlLen, GFP_KERNEL); if (!instanceData->controlRegisters->singleButtonControl) { printk(KERN_ERR "%s: Error allocating storage for single button participation control info.\n", __func__); return -ENOMEM; } memcpy(instanceData->controlRegisters->singleButtonControl, &fn19Control[instanceData->fn19singleBtnCtrlOffset], instanceData->fn19singleBtnCtrlLen); /* Fill in sensor map registers */ instanceData->fn19sensorMapCtrlOffset = instanceData->fn19singleBtnCtrlOffset + instanceData->fn19singleBtnCtrlLen; instanceData->fn19sensorMapCtrlLen = instanceData->deviceInfo->buttonCount; instanceData->controlRegisters->sensorMap = kzalloc(instanceData->fn19sensorMapCtrlLen, GFP_KERNEL); if (!instanceData->controlRegisters->sensorMap) { printk(KERN_ERR "%s: Error allocating storage for sensor map control info.\n", __func__); return -ENOMEM; } memcpy(instanceData->controlRegisters->sensorMap, &fn19Control[instanceData->fn19sensorMapCtrlOffset], instanceData->fn19sensorMapCtrlLen); /* Fill in single button sensitivity registers */ instanceData->fn19singleBtnSensOffset = instanceData->fn19sensorMapCtrlOffset + instanceData->fn19sensorMapCtrlLen; instanceData->fn19singleBtnSensLen = instanceData->deviceInfo->buttonCount; instanceData->controlRegisters->singleButtonSensitivity = kzalloc(instanceData->fn19singleBtnSensLen, GFP_KERNEL); if (!instanceData->controlRegisters->intEnableRegisters) { printk(KERN_ERR "%s: Error allocating storage for single button sensitivity control info.\n", __func__); return -ENOMEM; } memcpy(instanceData->controlRegisters->singleButtonSensitivity, &fn19Control[instanceData->fn19singleBtnSensOffset], instanceData->fn19singleBtnSensLen); /* Fill in global sensitivity adjustment and global hysteresis threshold values */ instanceData->fn19globalSensOffset = instanceData->fn19singleBtnSensOffset + instanceData->fn19singleBtnSensLen; instanceData->fn19globalHystThreshOffset = instanceData->fn19globalSensOffset + 1; instanceData->controlRegisters->globalSensitivityAdjustment = fn19Control[instanceData->fn19globalSensOffset] & 0x1f; instanceData->controlRegisters->globalHysteresisThreshold = fn19Control[instanceData->fn19globalHystThreshOffset] & 0x0f; /* Free up temp storage that held copy of control registers */ kfree(fn19Control); return 0; } int FN_19_detect(struct rmi_function_info *rmifninfo, struct rmi_function_descriptor *fndescr, unsigned int interruptCount) { unsigned char fn19queries[2]; int retval = 0; int i; struct f19_instance_data *instanceData; int fn19InterruptOffset; printk(KERN_DEBUG "%s: RMI4 F19 detect\n", __func__); instanceData = kzalloc(sizeof(struct f19_instance_data), GFP_KERNEL); if (!instanceData) { printk(KERN_ERR "%s: Error allocating F19 instance data.\n", __func__); return -ENOMEM; } instanceData->deviceInfo = kzalloc(sizeof(struct rmi_F19_query), GFP_KERNEL); if (!instanceData->deviceInfo) { printk(KERN_ERR "%s: Error allocating F19 device query.\n", __func__); return -ENOMEM; } rmifninfo->fndata = instanceData; /* Store addresses - used elsewhere to read data, * control, query, etc. */ rmifninfo->funcDescriptor.queryBaseAddr = fndescr->queryBaseAddr; rmifninfo->funcDescriptor.commandBaseAddr = fndescr->commandBaseAddr; rmifninfo->funcDescriptor.controlBaseAddr = fndescr->controlBaseAddr; rmifninfo->funcDescriptor.dataBaseAddr = fndescr->dataBaseAddr; rmifninfo->funcDescriptor.interruptSrcCnt = fndescr->interruptSrcCnt; rmifninfo->funcDescriptor.functionNum = fndescr->functionNum; rmifninfo->numSources = fndescr->interruptSrcCnt; /* need to get number of fingers supported, data size, etc. - to be used when getting data since the number of registers to read depends on the number of fingers supported and data size. */ retval = rmi_read_multiple(rmifninfo->sensor, fndescr->queryBaseAddr, fn19queries, sizeof(fn19queries)); if (retval) { printk(KERN_ERR "%s: RMI4 F19 detect: " "Could not read function query registers 0x%x\n", __func__, rmifninfo->funcDescriptor.queryBaseAddr); return retval; } /* Extract device data. */ instanceData->deviceInfo->configurable = fn19queries[0] & 0x01; instanceData->deviceInfo->hasSensitivityAdjust = fn19queries[0] & 0x02; instanceData->deviceInfo->hasHysteresisThreshold = fn19queries[0] & 0x04; instanceData->deviceInfo->buttonCount = fn19queries[1] & 0x01F; printk(KERN_DEBUG "%s: F19 device - %d buttons...", __func__, instanceData->deviceInfo->buttonCount); /* Need to get interrupt info to be used later when handling interrupts. */ rmifninfo->interruptRegister = interruptCount/8; /* loop through interrupts for each source in fn $11 and or in a bit to the interrupt mask for each. */ fn19InterruptOffset = interruptCount % 8; for (i = fn19InterruptOffset; i < ((fndescr->interruptSrcCnt & 0x7) + fn19InterruptOffset); i++) rmifninfo->interruptMask |= 1 << i; /* Figure out just how much data we'll need to read. */ instanceData->buttonDown = kcalloc(instanceData->deviceInfo->buttonCount, sizeof(bool), GFP_KERNEL); if (!instanceData->buttonDown) { printk(KERN_ERR "%s: Error allocating F19 button state buffer.\n", __func__); return -ENOMEM; } instanceData->buttonDataBufferSize = (instanceData->deviceInfo->buttonCount + 7) / 8; instanceData->buttonDataBuffer = kcalloc(instanceData->buttonDataBufferSize, sizeof(unsigned char), GFP_KERNEL); if (!instanceData->buttonDataBuffer) { printk(KERN_ERR "%s: Failed to allocate button data buffer.", __func__); return -ENOMEM; } instanceData->buttonMap = kcalloc(instanceData->deviceInfo->buttonCount, sizeof(unsigned char), GFP_KERNEL); if (!instanceData->buttonMap) { printk(KERN_ERR "%s: Error allocating F19 button map.\n", __func__); return -ENOMEM; } for (i = 0; i < instanceData->deviceInfo->buttonCount; i++) instanceData->buttonMap[i] = BTN_0 + i; /* default values */ /* Grab the control register info. */ retval = getControlRegisters(rmifninfo, fndescr); if (retval) { printk(KERN_ERR "%s: Error %d getting fn19 control register info.\n", __func__, retval); return retval; } return 0; } EXPORT_SYMBOL(FN_19_detect); static ssize_t rmi_f19_buttonCount_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rmi_function_device *fn = dev_get_drvdata(dev); struct f19_instance_data *instance_data = (struct f19_instance_data *)fn->rfi->fndata; return sprintf(buf, "%u\n", instance_data->deviceInfo->buttonCount); } static ssize_t rmi_f19_buttonCount_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { /* Not allowed. */ return -EPERM; } static ssize_t rmi_f19_buttonMap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rmi_function_device *fn = dev_get_drvdata(dev); struct f19_instance_data *instance_data = (struct f19_instance_data *)fn->rfi->fndata; int i, len, totalLen = 0; /* loop through each button map value and copy it's string representation into buf */ for (i = 0; i < instance_data->deviceInfo->buttonCount; i++) { /* get next button mapping value and write it to buf */ len = sprintf(buf, "%u ", instance_data->buttonMap[i]); /* bump up ptr to next location in buf if the sprintf was valid */ if (len > 0) { buf += len; totalLen += len; } } return totalLen; } static ssize_t rmi_f19_buttonMap_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct rmi_function_device *fn = dev_get_drvdata(dev); struct f19_instance_data *instance_data = (struct f19_instance_data *)fn->rfi->fndata; unsigned int button; int i; int retval = count; int buttonCount = 0; unsigned char *tmpButtonMap; /* Do validation on the button map data passed in. */ /* Store button mappings into a temp buffer and then verify button count and data prior to clearing out old button mappings and storing the new ones. */ tmpButtonMap = kzalloc(instance_data->deviceInfo->buttonCount, GFP_KERNEL); if (!tmpButtonMap) { printk(KERN_ERR "%s: Error allocating temp button map.\n", __func__); return -ENOMEM; } for (i = 0; i < instance_data->deviceInfo->buttonCount && *buf != 0; i++) { /* get next button mapping value and store and bump up to point to next item in buf */ sscanf(buf, "%u", &button); /* Make sure the key is a valid key */ if (button > KEY_MAX) { printk(KERN_ERR "%s: Error - button map for button %d is not a valid value 0x%x.\n", __func__, i, button); retval = -EINVAL; goto err_ret; } tmpButtonMap[i] = button; buttonCount++; /* bump up buf to point to next item to read */ while (*buf != 0) { buf++; if (*(buf-1) == ' ') break; } } /* Make sure the button count matches */ if (buttonCount != instance_data->deviceInfo->buttonCount) { printk(KERN_ERR "%s: Error - button map count of %d doesn't match device button count of %d.\n" , __func__, buttonCount, instance_data->deviceInfo->buttonCount); retval = -EINVAL; goto err_ret; } /* Clear out old buttonMap data */ memset(instance_data->buttonMap, 0, buttonCount); /* Loop through the temp buffer and copy the button event and set the key bit for the new mapping. */ for (i = 0; i < buttonCount; i++) { instance_data->buttonMap[i] = tmpButtonMap[1]; set_bit(instance_data->buttonMap[i], fn->input->keybit); } err_ret: kfree(tmpButtonMap); return retval; }
gpl-2.0
dh-harald/amlogic-kernel
drivers/media/video/ths7303.c
4245
3655
/* * ths7303- THS7303 Video Amplifier driver * * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed .as is. WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/v4l2-chip-ident.h> MODULE_DESCRIPTION("TI THS7303 video amplifier driver"); MODULE_AUTHOR("Chaithrika U S"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level 0-1"); /* following function is used to set ths7303 */ static int ths7303_setvalue(struct v4l2_subdev *sd, v4l2_std_id std) { int err = 0; u8 val; struct i2c_client *client; client = v4l2_get_subdevdata(sd); if (std & (V4L2_STD_ALL & ~V4L2_STD_SECAM)) { val = 0x02; v4l2_dbg(1, debug, sd, "setting value for SDTV format\n"); } else { val = 0x00; v4l2_dbg(1, debug, sd, "disabling all channels\n"); } err |= i2c_smbus_write_byte_data(client, 0x01, val); err |= i2c_smbus_write_byte_data(client, 0x02, val); err |= i2c_smbus_write_byte_data(client, 0x03, val); if (err) v4l2_err(sd, "write failed\n"); return err; } static int ths7303_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm) { return ths7303_setvalue(sd, norm); } static int ths7303_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_THS7303, 0); } static const struct v4l2_subdev_video_ops ths7303_video_ops = { .s_std_output = ths7303_s_std_output, }; static const struct v4l2_subdev_core_ops ths7303_core_ops = { .g_chip_ident = ths7303_g_chip_ident, }; static const struct v4l2_subdev_ops ths7303_ops = { .core = &ths7303_core_ops, .video = &ths7303_video_ops, }; static int ths7303_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct v4l2_subdev *sd; v4l2_std_id std_id = V4L2_STD_NTSC; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &ths7303_ops); return ths7303_setvalue(sd, std_id); } static int ths7303_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(sd); return 0; } static const struct i2c_device_id ths7303_id[] = { {"ths7303", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, ths7303_id); static struct i2c_driver ths7303_driver = { .driver = { .owner = THIS_MODULE, .name = "ths7303", }, .probe = ths7303_probe, .remove = ths7303_remove, .id_table = ths7303_id, }; static int __init ths7303_init(void) { return i2c_add_driver(&ths7303_driver); } static void __exit ths7303_exit(void) { i2c_del_driver(&ths7303_driver); } module_init(ths7303_init); module_exit(ths7303_exit);
gpl-2.0
timmytim/cm_kernel_jflteatt
arch/powerpc/platforms/powernv/setup.c
4757
4219
/* * PowerNV setup code. * * Copyright 2011 IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #undef DEBUG #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/tty.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/of.h> #include <linux/interrupt.h> #include <linux/bug.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/xics.h> #include <asm/rtas.h> #include <asm/opal.h> #include "powernv.h" static void __init pnv_setup_arch(void) { /* Initialize SMP */ pnv_smp_init(); /* Setup PCI */ pnv_pci_init(); /* Setup RTC and NVRAM callbacks */ if (firmware_has_feature(FW_FEATURE_OPAL)) opal_nvram_init(); /* Enable NAP mode */ powersave_nap = 1; /* XXX PMCS */ } static void __init pnv_init_early(void) { #ifdef CONFIG_HVC_OPAL if (firmware_has_feature(FW_FEATURE_OPAL)) hvc_opal_init_early(); else #endif add_preferred_console("hvc", 0, NULL); } static void __init pnv_init_IRQ(void) { xics_init(); WARN_ON(!ppc_md.get_irq); } static void pnv_show_cpuinfo(struct seq_file *m) { struct device_node *root; const char *model = ""; root = of_find_node_by_path("/"); if (root) model = of_get_property(root, "model", NULL); seq_printf(m, "machine\t\t: PowerNV %s\n", model); if (firmware_has_feature(FW_FEATURE_OPALv2)) seq_printf(m, "firmware\t: OPAL v2\n"); else if (firmware_has_feature(FW_FEATURE_OPAL)) seq_printf(m, "firmware\t: OPAL v1\n"); else seq_printf(m, "firmware\t: BML\n"); of_node_put(root); } static void __noreturn pnv_restart(char *cmd) { long rc = OPAL_BUSY; while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_cec_reboot(); if (rc == OPAL_BUSY_EVENT) opal_poll_events(NULL); else mdelay(10); } for (;;) opal_poll_events(NULL); } static void __noreturn pnv_power_off(void) { long rc = OPAL_BUSY; while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_cec_power_down(0); if (rc == OPAL_BUSY_EVENT) opal_poll_events(NULL); else mdelay(10); } for (;;) opal_poll_events(NULL); } static void __noreturn pnv_halt(void) { pnv_power_off(); } static void pnv_progress(char *s, unsigned short hex) { } #ifdef CONFIG_KEXEC static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) { xics_kexec_teardown_cpu(secondary); } #endif /* CONFIG_KEXEC */ static void __init pnv_setup_machdep_opal(void) { ppc_md.get_boot_time = opal_get_boot_time; ppc_md.get_rtc_time = opal_get_rtc_time; ppc_md.set_rtc_time = opal_set_rtc_time; ppc_md.restart = pnv_restart; ppc_md.power_off = pnv_power_off; ppc_md.halt = pnv_halt; ppc_md.machine_check_exception = opal_machine_check; } #ifdef CONFIG_PPC_POWERNV_RTAS static void __init pnv_setup_machdep_rtas(void) { if (rtas_token("get-time-of-day") != RTAS_UNKNOWN_SERVICE) { ppc_md.get_boot_time = rtas_get_boot_time; ppc_md.get_rtc_time = rtas_get_rtc_time; ppc_md.set_rtc_time = rtas_set_rtc_time; } ppc_md.restart = rtas_restart; ppc_md.power_off = rtas_power_off; ppc_md.halt = rtas_halt; } #endif /* CONFIG_PPC_POWERNV_RTAS */ static int __init pnv_probe(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "ibm,powernv")) return 0; hpte_init_native(); if (firmware_has_feature(FW_FEATURE_OPAL)) pnv_setup_machdep_opal(); #ifdef CONFIG_PPC_POWERNV_RTAS else if (rtas.base) pnv_setup_machdep_rtas(); #endif /* CONFIG_PPC_POWERNV_RTAS */ pr_debug("PowerNV detected !\n"); return 1; } define_machine(powernv) { .name = "PowerNV", .probe = pnv_probe, .init_early = pnv_init_early, .setup_arch = pnv_setup_arch, .init_IRQ = pnv_init_IRQ, .show_cpuinfo = pnv_show_cpuinfo, .progress = pnv_progress, .power_save = power7_idle, .calibrate_decr = generic_calibrate_decr, #ifdef CONFIG_KEXEC .kexec_cpu_down = pnv_kexec_cpu_down, #endif };
gpl-2.0
JB1tz/kernel-msm
arch/mips/jz4740/timer.c
7573
1470
/* * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 platform timer support * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include "timer.h" #include <asm/mach-jz4740/base.h> void __iomem *jz4740_timer_base; void jz4740_timer_enable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); } EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog); void jz4740_timer_disable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); } EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog); void __init jz4740_timer_init(void) { jz4740_timer_base = ioremap(JZ4740_TCU_BASE_ADDR, 0x100); if (!jz4740_timer_base) panic("Failed to ioremap timer registers"); /* Disable all timer clocks except for those used as system timers */ writel(0x000100fc, jz4740_timer_base + JZ_REG_TIMER_STOP_SET); /* Timer irqs are unmasked by default, mask them */ writel(0x00ff00ff, jz4740_timer_base + JZ_REG_TIMER_MASK_SET); }
gpl-2.0
sebirdman/m7_kernel
fs/ocfs2/cluster/quorum.c
8085
10355
/* -*- mode: c; c-basic-offset: 8; -*- * * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ /* This quorum hack is only here until we transition to some more rational * approach that is driven from userspace. Honest. No foolin'. * * Imagine two nodes lose network connectivity to each other but they're still * up and operating in every other way. Presumably a network timeout indicates * that a node is broken and should be recovered. They can't both recover each * other and both carry on without serialising their access to the file system. * They need to decide who is authoritative. Now extend that problem to * arbitrary groups of nodes losing connectivity between each other. * * So we declare that a node which has given up on connecting to a majority * of nodes who are still heartbeating will fence itself. * * There are huge opportunities for races here. After we give up on a node's * connection we need to wait long enough to give heartbeat an opportunity * to declare the node as truly dead. We also need to be careful with the * race between when we see a node start heartbeating and when we connect * to it. * * So nodes that are in this transtion put a hold on the quorum decision * with a counter. As they fall out of this transition they drop the count * and if they're the last, they fire off the decision. */ #include <linux/kernel.h> #include <linux/workqueue.h> #include <linux/reboot.h> #include "heartbeat.h" #include "nodemanager.h" #define MLOG_MASK_PREFIX ML_QUORUM #include "masklog.h" #include "quorum.h" static struct o2quo_state { spinlock_t qs_lock; struct work_struct qs_work; int qs_pending; int qs_heartbeating; unsigned long qs_hb_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; int qs_connected; unsigned long qs_conn_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; int qs_holds; unsigned long qs_hold_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; } o2quo_state; /* this is horribly heavy-handed. It should instead flip the file * system RO and call some userspace script. */ static void o2quo_fence_self(void) { /* panic spins with interrupts enabled. with preempt * threads can still schedule, etc, etc */ o2hb_stop_all_regions(); switch (o2nm_single_cluster->cl_fence_method) { case O2NM_FENCE_PANIC: panic("*** ocfs2 is very sorry to be fencing this system by " "panicing ***\n"); break; default: WARN_ON(o2nm_single_cluster->cl_fence_method >= O2NM_FENCE_METHODS); case O2NM_FENCE_RESET: printk(KERN_ERR "*** ocfs2 is very sorry to be fencing this " "system by restarting ***\n"); emergency_restart(); break; }; } /* Indicate that a timeout occurred on a hearbeat region write. The * other nodes in the cluster may consider us dead at that time so we * want to "fence" ourselves so that we don't scribble on the disk * after they think they've recovered us. This can't solve all * problems related to writeout after recovery but this hack can at * least close some of those gaps. When we have real fencing, this can * go away as our node would be fenced externally before other nodes * begin recovery. */ void o2quo_disk_timeout(void) { o2quo_fence_self(); } static void o2quo_make_decision(struct work_struct *work) { int quorum; int lowest_hb, lowest_reachable = 0, fence = 0; struct o2quo_state *qs = &o2quo_state; spin_lock(&qs->qs_lock); lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES); if (lowest_hb != O2NM_MAX_NODES) lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm); mlog(0, "heartbeating: %d, connected: %d, " "lowest: %d (%sreachable)\n", qs->qs_heartbeating, qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un"); if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) || qs->qs_heartbeating == 1) goto out; if (qs->qs_heartbeating & 1) { /* the odd numbered cluster case is straight forward -- * if we can't talk to the majority we're hosed */ quorum = (qs->qs_heartbeating + 1)/2; if (qs->qs_connected < quorum) { mlog(ML_ERROR, "fencing this node because it is " "only connected to %u nodes and %u is needed " "to make a quorum out of %u heartbeating nodes\n", qs->qs_connected, quorum, qs->qs_heartbeating); fence = 1; } } else { /* the even numbered cluster adds the possibility of each half * of the cluster being able to talk amongst themselves.. in * that case we're hosed if we can't talk to the group that has * the lowest numbered node */ quorum = qs->qs_heartbeating / 2; if (qs->qs_connected < quorum) { mlog(ML_ERROR, "fencing this node because it is " "only connected to %u nodes and %u is needed " "to make a quorum out of %u heartbeating nodes\n", qs->qs_connected, quorum, qs->qs_heartbeating); fence = 1; } else if ((qs->qs_connected == quorum) && !lowest_reachable) { mlog(ML_ERROR, "fencing this node because it is " "connected to a half-quorum of %u out of %u " "nodes which doesn't include the lowest active " "node %u\n", quorum, qs->qs_heartbeating, lowest_hb); fence = 1; } } out: spin_unlock(&qs->qs_lock); if (fence) o2quo_fence_self(); } static void o2quo_set_hold(struct o2quo_state *qs, u8 node) { assert_spin_locked(&qs->qs_lock); if (!test_and_set_bit(node, qs->qs_hold_bm)) { qs->qs_holds++; mlog_bug_on_msg(qs->qs_holds == O2NM_MAX_NODES, "node %u\n", node); mlog(0, "node %u, %d total\n", node, qs->qs_holds); } } static void o2quo_clear_hold(struct o2quo_state *qs, u8 node) { assert_spin_locked(&qs->qs_lock); if (test_and_clear_bit(node, qs->qs_hold_bm)) { mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1); if (--qs->qs_holds == 0) { if (qs->qs_pending) { qs->qs_pending = 0; schedule_work(&qs->qs_work); } } mlog_bug_on_msg(qs->qs_holds < 0, "node %u, holds %d\n", node, qs->qs_holds); } } /* as a node comes up we delay the quorum decision until we know the fate of * the connection. the hold will be droped in conn_up or hb_down. it might be * perpetuated by con_err until hb_down. if we already have a conn, we might * be dropping a hold that conn_up got. */ void o2quo_hb_up(u8 node) { struct o2quo_state *qs = &o2quo_state; spin_lock(&qs->qs_lock); qs->qs_heartbeating++; mlog_bug_on_msg(qs->qs_heartbeating == O2NM_MAX_NODES, "node %u\n", node); mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node); set_bit(node, qs->qs_hb_bm); mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); if (!test_bit(node, qs->qs_conn_bm)) o2quo_set_hold(qs, node); else o2quo_clear_hold(qs, node); spin_unlock(&qs->qs_lock); } /* hb going down releases any holds we might have had due to this node from * conn_up, conn_err, or hb_up */ void o2quo_hb_down(u8 node) { struct o2quo_state *qs = &o2quo_state; spin_lock(&qs->qs_lock); qs->qs_heartbeating--; mlog_bug_on_msg(qs->qs_heartbeating < 0, "node %u, %d heartbeating\n", node, qs->qs_heartbeating); mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node); clear_bit(node, qs->qs_hb_bm); mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); o2quo_clear_hold(qs, node); spin_unlock(&qs->qs_lock); } /* this tells us that we've decided that the node is still heartbeating * even though we've lost it's conn. it must only be called after conn_err * and indicates that we must now make a quorum decision in the future, * though we might be doing so after waiting for holds to drain. Here * we'll be dropping the hold from conn_err. */ void o2quo_hb_still_up(u8 node) { struct o2quo_state *qs = &o2quo_state; spin_lock(&qs->qs_lock); mlog(0, "node %u\n", node); qs->qs_pending = 1; o2quo_clear_hold(qs, node); spin_unlock(&qs->qs_lock); } /* This is analogous to hb_up. as a node's connection comes up we delay the * quorum decision until we see it heartbeating. the hold will be droped in * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if * it's already heartbeating we we might be dropping a hold that conn_up got. * */ void o2quo_conn_up(u8 node) { struct o2quo_state *qs = &o2quo_state; spin_lock(&qs->qs_lock); qs->qs_connected++; mlog_bug_on_msg(qs->qs_connected == O2NM_MAX_NODES, "node %u\n", node); mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node); set_bit(node, qs->qs_conn_bm); mlog(0, "node %u, %d total\n", node, qs->qs_connected); if (!test_bit(node, qs->qs_hb_bm)) o2quo_set_hold(qs, node); else o2quo_clear_hold(qs, node); spin_unlock(&qs->qs_lock); } /* we've decided that we won't ever be connecting to the node again. if it's * still heartbeating we grab a hold that will delay decisions until either the * node stops heartbeating from hb_down or the caller decides that the node is * still up and calls still_up */ void o2quo_conn_err(u8 node) { struct o2quo_state *qs = &o2quo_state; spin_lock(&qs->qs_lock); if (test_bit(node, qs->qs_conn_bm)) { qs->qs_connected--; mlog_bug_on_msg(qs->qs_connected < 0, "node %u, connected %d\n", node, qs->qs_connected); clear_bit(node, qs->qs_conn_bm); } mlog(0, "node %u, %d total\n", node, qs->qs_connected); if (test_bit(node, qs->qs_hb_bm)) o2quo_set_hold(qs, node); spin_unlock(&qs->qs_lock); } void o2quo_init(void) { struct o2quo_state *qs = &o2quo_state; spin_lock_init(&qs->qs_lock); INIT_WORK(&qs->qs_work, o2quo_make_decision); } void o2quo_exit(void) { struct o2quo_state *qs = &o2quo_state; flush_work_sync(&qs->qs_work); }
gpl-2.0
jkoelker/pandaboard-kernel
drivers/media/dvb/ttpci/av7110_ir.c
8341
11292
/* * Driver for the remote control of SAA7146 based AV7110 cards * * Copyright (C) 1999-2003 Holger Waechtler <holger@convergence.de> * Copyright (C) 2003-2007 Oliver Endriss <o.endriss@gmx.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * */ #include <linux/types.h> #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/kernel.h> #include <linux/bitops.h> #include "av7110.h" #include "av7110_hw.h" #define AV_CNT 4 #define IR_RC5 0 #define IR_RCMM 1 #define IR_RC5_EXT 2 /* internal only */ #define IR_ALL 0xffffffff #define UP_TIMEOUT (HZ*7/25) /* Note: enable ir debugging by or'ing debug with 16 */ static int ir_protocol[AV_CNT] = { IR_RCMM, IR_RCMM, IR_RCMM, IR_RCMM}; module_param_array(ir_protocol, int, NULL, 0644); MODULE_PARM_DESC(ir_protocol, "Infrared protocol: 0 RC5, 1 RCMM (default)"); static int ir_inversion[AV_CNT]; module_param_array(ir_inversion, int, NULL, 0644); MODULE_PARM_DESC(ir_inversion, "Inversion of infrared signal: 0 not inverted (default), 1 inverted"); static uint ir_device_mask[AV_CNT] = { IR_ALL, IR_ALL, IR_ALL, IR_ALL }; module_param_array(ir_device_mask, uint, NULL, 0644); MODULE_PARM_DESC(ir_device_mask, "Bitmask of infrared devices: bit 0..31 = device 0..31 (default: all)"); static int av_cnt; static struct av7110 *av_list[AV_CNT]; static u16 default_key_map [256] = { KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, KEY_BACK, 0, KEY_POWER, KEY_MUTE, 0, KEY_INFO, KEY_VOLUMEUP, KEY_VOLUMEDOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_CHANNELUP, KEY_CHANNELDOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_TEXT, 0, 0, KEY_TV, 0, 0, 0, 0, 0, KEY_SETUP, 0, 0, 0, 0, 0, KEY_SUBTITLE, 0, 0, KEY_LANGUAGE, 0, KEY_RADIO, 0, 0, 0, 0, KEY_EXIT, 0, 0, KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT, KEY_OK, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_RED, KEY_GREEN, KEY_YELLOW, KEY_BLUE, 0, 0, 0, 0, 0, 0, 0, KEY_MENU, KEY_LIST, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_UP, KEY_UP, KEY_DOWN, KEY_DOWN, 0, 0, 0, 0, KEY_EPG, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_VCR }; /* key-up timer */ static void av7110_emit_keyup(unsigned long parm) { struct infrared *ir = (struct infrared *) parm; if (!ir || !test_bit(ir->last_key, ir->input_dev->key)) return; input_report_key(ir->input_dev, ir->last_key, 0); input_sync(ir->input_dev); } /* tasklet */ static void av7110_emit_key(unsigned long parm) { struct infrared *ir = (struct infrared *) parm; u32 ircom = ir->ir_command; u8 data; u8 addr; u16 toggle; u16 keycode; /* extract device address and data */ switch (ir->protocol) { case IR_RC5: /* RC5: 5 bits device address, 6 bits data */ data = ircom & 0x3f; addr = (ircom >> 6) & 0x1f; toggle = ircom & 0x0800; break; case IR_RCMM: /* RCMM: ? bits device address, ? bits data */ data = ircom & 0xff; addr = (ircom >> 8) & 0x1f; toggle = ircom & 0x8000; break; case IR_RC5_EXT: /* extended RC5: 5 bits device address, 7 bits data */ data = ircom & 0x3f; addr = (ircom >> 6) & 0x1f; /* invert 7th data bit for backward compatibility with RC5 keymaps */ if (!(ircom & 0x1000)) data |= 0x40; toggle = ircom & 0x0800; break; default: printk("%s invalid protocol %x\n", __func__, ir->protocol); return; } input_event(ir->input_dev, EV_MSC, MSC_RAW, (addr << 16) | data); input_event(ir->input_dev, EV_MSC, MSC_SCAN, data); keycode = ir->key_map[data]; dprintk(16, "%s: code %08x -> addr %i data 0x%02x -> keycode %i\n", __func__, ircom, addr, data, keycode); /* check device address */ if (!(ir->device_mask & (1 << addr))) return; if (!keycode) { printk ("%s: code %08x -> addr %i data 0x%02x -> unknown key!\n", __func__, ircom, addr, data); return; } if (timer_pending(&ir->keyup_timer)) { del_timer(&ir->keyup_timer); if (ir->last_key != keycode || toggle != ir->last_toggle) { ir->delay_timer_finished = 0; input_event(ir->input_dev, EV_KEY, ir->last_key, 0); input_event(ir->input_dev, EV_KEY, keycode, 1); input_sync(ir->input_dev); } else if (ir->delay_timer_finished) { input_event(ir->input_dev, EV_KEY, keycode, 2); input_sync(ir->input_dev); } } else { ir->delay_timer_finished = 0; input_event(ir->input_dev, EV_KEY, keycode, 1); input_sync(ir->input_dev); } ir->last_key = keycode; ir->last_toggle = toggle; ir->keyup_timer.expires = jiffies + UP_TIMEOUT; add_timer(&ir->keyup_timer); } /* register with input layer */ static void input_register_keys(struct infrared *ir) { int i; set_bit(EV_KEY, ir->input_dev->evbit); set_bit(EV_REP, ir->input_dev->evbit); set_bit(EV_MSC, ir->input_dev->evbit); set_bit(MSC_RAW, ir->input_dev->mscbit); set_bit(MSC_SCAN, ir->input_dev->mscbit); memset(ir->input_dev->keybit, 0, sizeof(ir->input_dev->keybit)); for (i = 0; i < ARRAY_SIZE(ir->key_map); i++) { if (ir->key_map[i] > KEY_MAX) ir->key_map[i] = 0; else if (ir->key_map[i] > KEY_RESERVED) set_bit(ir->key_map[i], ir->input_dev->keybit); } ir->input_dev->keycode = ir->key_map; ir->input_dev->keycodesize = sizeof(ir->key_map[0]); ir->input_dev->keycodemax = ARRAY_SIZE(ir->key_map); } /* called by the input driver after rep[REP_DELAY] ms */ static void input_repeat_key(unsigned long parm) { struct infrared *ir = (struct infrared *) parm; ir->delay_timer_finished = 1; } /* check for configuration changes */ int av7110_check_ir_config(struct av7110 *av7110, int force) { int i; int modified = force; int ret = -ENODEV; for (i = 0; i < av_cnt; i++) if (av7110 == av_list[i]) break; if (i < av_cnt && av7110) { if ((av7110->ir.protocol & 1) != ir_protocol[i] || av7110->ir.inversion != ir_inversion[i]) modified = true; if (modified) { /* protocol */ if (ir_protocol[i]) { ir_protocol[i] = 1; av7110->ir.protocol = IR_RCMM; av7110->ir.ir_config = 0x0001; } else if (FW_VERSION(av7110->arm_app) >= 0x2620) { av7110->ir.protocol = IR_RC5_EXT; av7110->ir.ir_config = 0x0002; } else { av7110->ir.protocol = IR_RC5; av7110->ir.ir_config = 0x0000; } /* inversion */ if (ir_inversion[i]) { ir_inversion[i] = 1; av7110->ir.ir_config |= 0x8000; } av7110->ir.inversion = ir_inversion[i]; /* update ARM */ ret = av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetIR, 1, av7110->ir.ir_config); } else ret = 0; /* address */ if (av7110->ir.device_mask != ir_device_mask[i]) av7110->ir.device_mask = ir_device_mask[i]; } return ret; } /* /proc/av7110_ir interface */ static ssize_t av7110_ir_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char *page; u32 ir_config; int size = sizeof ir_config + sizeof av_list[0]->ir.key_map; int i; if (count < size) return -EINVAL; page = vmalloc(size); if (!page) return -ENOMEM; if (copy_from_user(page, buffer, size)) { vfree(page); return -EFAULT; } memcpy(&ir_config, page, sizeof ir_config); for (i = 0; i < av_cnt; i++) { /* keymap */ memcpy(av_list[i]->ir.key_map, page + sizeof ir_config, sizeof(av_list[i]->ir.key_map)); /* protocol, inversion, address */ ir_protocol[i] = ir_config & 0x0001; ir_inversion[i] = ir_config & 0x8000 ? 1 : 0; if (ir_config & 0x4000) ir_device_mask[i] = 1 << ((ir_config >> 16) & 0x1f); else ir_device_mask[i] = IR_ALL; /* update configuration */ av7110_check_ir_config(av_list[i], false); input_register_keys(&av_list[i]->ir); } vfree(page); return count; } static const struct file_operations av7110_ir_proc_fops = { .owner = THIS_MODULE, .write = av7110_ir_proc_write, .llseek = noop_llseek, }; /* interrupt handler */ static void ir_handler(struct av7110 *av7110, u32 ircom) { dprintk(4, "ir command = %08x\n", ircom); av7110->ir.ir_command = ircom; tasklet_schedule(&av7110->ir.ir_tasklet); } int __devinit av7110_ir_init(struct av7110 *av7110) { struct input_dev *input_dev; static struct proc_dir_entry *e; int err; if (av_cnt >= ARRAY_SIZE(av_list)) return -ENOSPC; av_list[av_cnt++] = av7110; av7110_check_ir_config(av7110, true); init_timer(&av7110->ir.keyup_timer); av7110->ir.keyup_timer.function = av7110_emit_keyup; av7110->ir.keyup_timer.data = (unsigned long) &av7110->ir; input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; av7110->ir.input_dev = input_dev; snprintf(av7110->ir.input_phys, sizeof(av7110->ir.input_phys), "pci-%s/ir0", pci_name(av7110->dev->pci)); input_dev->name = "DVB on-card IR receiver"; input_dev->phys = av7110->ir.input_phys; input_dev->id.bustype = BUS_PCI; input_dev->id.version = 2; if (av7110->dev->pci->subsystem_vendor) { input_dev->id.vendor = av7110->dev->pci->subsystem_vendor; input_dev->id.product = av7110->dev->pci->subsystem_device; } else { input_dev->id.vendor = av7110->dev->pci->vendor; input_dev->id.product = av7110->dev->pci->device; } input_dev->dev.parent = &av7110->dev->pci->dev; /* initial keymap */ memcpy(av7110->ir.key_map, default_key_map, sizeof av7110->ir.key_map); input_register_keys(&av7110->ir); err = input_register_device(input_dev); if (err) { input_free_device(input_dev); return err; } input_dev->timer.function = input_repeat_key; input_dev->timer.data = (unsigned long) &av7110->ir; if (av_cnt == 1) { e = proc_create("av7110_ir", S_IWUSR, NULL, &av7110_ir_proc_fops); if (e) e->size = 4 + 256 * sizeof(u16); } tasklet_init(&av7110->ir.ir_tasklet, av7110_emit_key, (unsigned long) &av7110->ir); av7110->ir.ir_handler = ir_handler; return 0; } void __devexit av7110_ir_exit(struct av7110 *av7110) { int i; if (av_cnt == 0) return; del_timer_sync(&av7110->ir.keyup_timer); av7110->ir.ir_handler = NULL; tasklet_kill(&av7110->ir.ir_tasklet); for (i = 0; i < av_cnt; i++) if (av_list[i] == av7110) { av_list[i] = av_list[av_cnt-1]; av_list[av_cnt-1] = NULL; break; } if (av_cnt == 1) remove_proc_entry("av7110_ir", NULL); input_unregister_device(av7110->ir.input_dev); av_cnt--; } //MODULE_AUTHOR("Holger Waechtler <holger@convergence.de>, Oliver Endriss <o.endriss@gmx.de>"); //MODULE_LICENSE("GPL");
gpl-2.0
bemolxd/android_kernel_x2xtreme-test
net/netfilter/nf_conntrack_broadcast.c
12437
2130
/* * broadcast connection tracking helper * * (c) 2005 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/ip.h> #include <net/route.h> #include <linux/inetdevice.h> #include <linux/skbuff.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_expect.h> int nf_conntrack_broadcast_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int timeout) { struct nf_conntrack_expect *exp; struct iphdr *iph = ip_hdr(skb); struct rtable *rt = skb_rtable(skb); struct in_device *in_dev; struct nf_conn_help *help = nfct_help(ct); __be32 mask = 0; /* we're only interested in locally generated packets */ if (skb->sk == NULL) goto out; if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) goto out; if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) goto out; rcu_read_lock(); in_dev = __in_dev_get_rcu(rt->dst.dev); if (in_dev != NULL) { for_primary_ifa(in_dev) { if (ifa->ifa_broadcast == iph->daddr) { mask = ifa->ifa_mask; break; } } endfor_ifa(in_dev); } rcu_read_unlock(); if (mask == 0) goto out; exp = nf_ct_expect_alloc(ct); if (exp == NULL) goto out; exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port; exp->mask.src.u3.ip = mask; exp->mask.src.u.udp.port = htons(0xFFFF); exp->expectfn = NULL; exp->flags = NF_CT_EXPECT_PERMANENT; exp->class = NF_CT_EXPECT_CLASS_DEFAULT; exp->helper = NULL; nf_ct_expect_related(exp); nf_ct_expect_put(exp); nf_ct_refresh(ct, skb, timeout * HZ); out: return NF_ACCEPT; } EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help); MODULE_LICENSE("GPL");
gpl-2.0
sparkma/kernel
drivers/edac/i7core_edac.c
150
56446
/* Intel i7 core/Nehalem Memory Controller kernel module * * This driver supports the memory controllers found on the Intel * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx, * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield * and Westmere-EP. * * This file may be distributed under the terms of the * GNU General Public License version 2 only. * * Copyright (c) 2009-2010 by: * Mauro Carvalho Chehab <mchehab@redhat.com> * * Red Hat Inc. http://www.redhat.com * * Forked and adapted from the i5400_edac driver * * Based on the following public Intel datasheets: * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor * Datasheet, Volume 2: * http://download.intel.com/design/processor/datashts/320835.pdf * Intel Xeon Processor 5500 Series Datasheet Volume 2 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf * also available at: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/edac.h> #include <linux/mmzone.h> #include <linux/edac_mce.h> #include <linux/smp.h> #include <asm/processor.h> #include "edac_core.h" /* Static vars */ static LIST_HEAD(i7core_edac_list); static DEFINE_MUTEX(i7core_edac_lock); static int probed; static int use_pci_fixup; module_param(use_pci_fixup, int, 0444); MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices"); /* * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core * registers start at bus 255, and are not reported by BIOS. * We currently find devices with only 2 sockets. In order to support more QPI * Quick Path Interconnect, just increment this number. */ #define MAX_SOCKET_BUSES 2 /* * Alter this version for the module when modifications are made */ #define I7CORE_REVISION " Ver: 1.0.0 " __DATE__ #define EDAC_MOD_STR "i7core_edac" /* * Debug macros */ #define i7core_printk(level, fmt, arg...) \ edac_printk(level, "i7core", fmt, ##arg) #define i7core_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg) /* * i7core Memory Controller Registers */ /* OFFSETS for Device 0 Function 0 */ #define MC_CFG_CONTROL 0x90 /* OFFSETS for Device 3 Function 0 */ #define MC_CONTROL 0x48 #define MC_STATUS 0x4c #define MC_MAX_DOD 0x64 /* * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ #define MC_TEST_ERR_RCV1 0x60 #define DIMM2_COR_ERR(r) ((r) & 0x7fff) #define MC_TEST_ERR_RCV0 0x64 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff) #define DIMM0_COR_ERR(r) ((r) & 0x7fff) /* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */ #define MC_COR_ECC_CNT_0 0x80 #define MC_COR_ECC_CNT_1 0x84 #define MC_COR_ECC_CNT_2 0x88 #define MC_COR_ECC_CNT_3 0x8c #define MC_COR_ECC_CNT_4 0x90 #define MC_COR_ECC_CNT_5 0x94 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff) #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff) /* OFFSETS for Devices 4,5 and 6 Function 0 */ #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58 #define THREE_DIMMS_PRESENT (1 << 24) #define SINGLE_QUAD_RANK_PRESENT (1 << 23) #define QUAD_RANK_PRESENT (1 << 22) #define REGISTERED_DIMM (1 << 15) #define MC_CHANNEL_MAPPER 0x60 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1) #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1) #define MC_CHANNEL_RANK_PRESENT 0x7c #define RANK_PRESENT_MASK 0xffff #define MC_CHANNEL_ADDR_MATCH 0xf0 #define MC_CHANNEL_ERROR_MASK 0xf8 #define MC_CHANNEL_ERROR_INJECT 0xfc #define INJECT_ADDR_PARITY 0x10 #define INJECT_ECC 0x08 #define MASK_CACHELINE 0x06 #define MASK_FULL_CACHELINE 0x06 #define MASK_MSB32_CACHELINE 0x04 #define MASK_LSB32_CACHELINE 0x02 #define NO_MASK_CACHELINE 0x00 #define REPEAT_EN 0x01 /* OFFSETS for Devices 4,5 and 6 Function 1 */ #define MC_DOD_CH_DIMM0 0x48 #define MC_DOD_CH_DIMM1 0x4c #define MC_DOD_CH_DIMM2 0x50 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10)) #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10) #define DIMM_PRESENT_MASK (1 << 9) #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9) #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7)) #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7) #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5)) #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5) #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2)) #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2) #define MC_DOD_NUMCOL_MASK 3 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK) #define MC_RANK_PRESENT 0x7c #define MC_SAG_CH_0 0x80 #define MC_SAG_CH_1 0x84 #define MC_SAG_CH_2 0x88 #define MC_SAG_CH_3 0x8c #define MC_SAG_CH_4 0x90 #define MC_SAG_CH_5 0x94 #define MC_SAG_CH_6 0x98 #define MC_SAG_CH_7 0x9c #define MC_RIR_LIMIT_CH_0 0x40 #define MC_RIR_LIMIT_CH_1 0x44 #define MC_RIR_LIMIT_CH_2 0x48 #define MC_RIR_LIMIT_CH_3 0x4C #define MC_RIR_LIMIT_CH_4 0x50 #define MC_RIR_LIMIT_CH_5 0x54 #define MC_RIR_LIMIT_CH_6 0x58 #define MC_RIR_LIMIT_CH_7 0x5C #define MC_RIR_LIMIT_MASK ((1 << 10) - 1) #define MC_RIR_WAY_CH 0x80 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7) #define MC_RIR_WAY_RANK_MASK 0x7 /* * i7core structs */ #define NUM_CHANS 3 #define MAX_DIMMS 3 /* Max DIMMS per channel */ #define MAX_MCR_FUNC 4 #define MAX_CHAN_FUNC 3 struct i7core_info { u32 mc_control; u32 mc_status; u32 max_dod; u32 ch_map; }; struct i7core_inject { int enable; u32 section; u32 type; u32 eccmask; /* Error address mask */ int channel, dimm, rank, bank, page, col; }; struct i7core_channel { u32 ranks; u32 dimms; }; struct pci_id_descr { int dev; int func; int dev_id; int optional; }; struct pci_id_table { const struct pci_id_descr *descr; int n_devs; }; struct i7core_dev { struct list_head list; u8 socket; struct pci_dev **pdev; int n_devs; struct mem_ctl_info *mci; }; struct i7core_pvt { struct pci_dev *pci_noncore; struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1]; struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1]; struct i7core_dev *i7core_dev; struct i7core_info info; struct i7core_inject inject; struct i7core_channel channel[NUM_CHANS]; int ce_count_available; int csrow_map[NUM_CHANS][MAX_DIMMS]; /* ECC corrected errors counts per udimm */ unsigned long udimm_ce_count[MAX_DIMMS]; int udimm_last_ce_count[MAX_DIMMS]; /* ECC corrected errors counts per rdimm */ unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS]; int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS]; unsigned int is_registered; /* mcelog glue */ struct edac_mce edac_mce; /* Fifo double buffers */ struct mce mce_entry[MCE_LOG_LEN]; struct mce mce_outentry[MCE_LOG_LEN]; /* Fifo in/out counters */ unsigned mce_in, mce_out; /* Count indicator to show errors not got */ unsigned mce_overrun; /* Struct to control EDAC polling */ struct edac_pci_ctl_info *i7core_pci; }; #define PCI_DESCR(device, function, device_id) \ .dev = (device), \ .func = (function), \ .dev_id = (device_id) static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = { /* Memory controller */ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) }, { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) }, /* Exists only for RDIMM */ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 }, { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) }, /* Channel 0 */ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) }, { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) }, { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) }, { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) }, /* Channel 1 */ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) }, { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) }, { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) }, { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) }, /* Channel 2 */ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) }, { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) }, { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) }, { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) }, }; static const struct pci_id_descr pci_dev_descr_lynnfield[] = { { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) }, { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) }, { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) }, { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) }, { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) }, { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) }, { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) }, { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) }, { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) }, { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) }, { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) }, }; static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = { /* Memory controller */ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) }, { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) }, /* Exists only for RDIMM */ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 }, { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) }, /* Channel 0 */ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) }, { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) }, { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) }, { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) }, /* Channel 1 */ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) }, { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) }, { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) }, { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) }, /* Channel 2 */ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) }, { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) }, { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) }, { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) }, }; #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } static const struct pci_id_table pci_dev_table[] = { PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem), PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield), PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere), {0,} /* 0 terminated list. */ }; /* * pci_device_id table for which devices we are looking for */ static const struct pci_device_id i7core_pci_tbl[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, {0,} /* 0 terminated list. */ }; /**************************************************************************** Anciliary status routines ****************************************************************************/ /* MC_CONTROL bits */ #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch))) #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1)) /* MC_STATUS bits */ #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4)) #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch)) /* MC_MAX_DOD read functions */ static inline int numdimms(u32 dimms) { return (dimms & 0x3) + 1; } static inline int numrank(u32 rank) { static int ranks[4] = { 1, 2, 4, -EINVAL }; return ranks[rank & 0x3]; } static inline int numbank(u32 bank) { static int banks[4] = { 4, 8, 16, -EINVAL }; return banks[bank & 0x3]; } static inline int numrow(u32 row) { static int rows[8] = { 1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, -EINVAL, -EINVAL, -EINVAL, }; return rows[row & 0x7]; } static inline int numcol(u32 col) { static int cols[8] = { 1 << 10, 1 << 11, 1 << 12, -EINVAL, }; return cols[col & 0x3]; } static struct i7core_dev *get_i7core_dev(u8 socket) { struct i7core_dev *i7core_dev; list_for_each_entry(i7core_dev, &i7core_edac_list, list) { if (i7core_dev->socket == socket) return i7core_dev; } return NULL; } static struct i7core_dev *alloc_i7core_dev(u8 socket, const struct pci_id_table *table) { struct i7core_dev *i7core_dev; i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL); if (!i7core_dev) return NULL; i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs, GFP_KERNEL); if (!i7core_dev->pdev) { kfree(i7core_dev); return NULL; } i7core_dev->socket = socket; i7core_dev->n_devs = table->n_devs; list_add_tail(&i7core_dev->list, &i7core_edac_list); return i7core_dev; } static void free_i7core_dev(struct i7core_dev *i7core_dev) { list_del(&i7core_dev->list); kfree(i7core_dev->pdev); kfree(i7core_dev); } /**************************************************************************** Memory check routines ****************************************************************************/ static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot, unsigned func) { struct i7core_dev *i7core_dev = get_i7core_dev(socket); int i; if (!i7core_dev) return NULL; for (i = 0; i < i7core_dev->n_devs; i++) { if (!i7core_dev->pdev[i]) continue; if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot && PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) { return i7core_dev->pdev[i]; } } return NULL; } /** * i7core_get_active_channels() - gets the number of channels and csrows * @socket: Quick Path Interconnect socket * @channels: Number of channels that will be returned * @csrows: Number of csrows found * * Since EDAC core needs to know in advance the number of available channels * and csrows, in order to allocate memory for csrows/channels, it is needed * to run two similar steps. At the first step, implemented on this function, * it checks the number of csrows/channels present at one socket. * this is used in order to properly allocate the size of mci components. * * It should be noticed that none of the current available datasheets explain * or even mention how csrows are seen by the memory controller. So, we need * to add a fake description for csrows. * So, this driver is attributing one DIMM memory for one csrow. */ static int i7core_get_active_channels(const u8 socket, unsigned *channels, unsigned *csrows) { struct pci_dev *pdev = NULL; int i, j; u32 status, control; *channels = 0; *csrows = 0; pdev = get_pdev_slot_func(socket, 3, 0); if (!pdev) { i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n", socket); return -ENODEV; } /* Device 3 function 0 reads */ pci_read_config_dword(pdev, MC_STATUS, &status); pci_read_config_dword(pdev, MC_CONTROL, &control); for (i = 0; i < NUM_CHANS; i++) { u32 dimm_dod[3]; /* Check if the channel is active */ if (!(control & (1 << (8 + i)))) continue; /* Check if the channel is disabled */ if (status & (1 << i)) continue; pdev = get_pdev_slot_func(socket, i + 4, 1); if (!pdev) { i7core_printk(KERN_ERR, "Couldn't find socket %d " "fn %d.%d!!!\n", socket, i + 4, 1); return -ENODEV; } /* Devices 4-6 function 1 */ pci_read_config_dword(pdev, MC_DOD_CH_DIMM0, &dimm_dod[0]); pci_read_config_dword(pdev, MC_DOD_CH_DIMM1, &dimm_dod[1]); pci_read_config_dword(pdev, MC_DOD_CH_DIMM2, &dimm_dod[2]); (*channels)++; for (j = 0; j < 3; j++) { if (!DIMM_PRESENT(dimm_dod[j])) continue; (*csrows)++; } } debugf0("Number of active channels on socket %d: %d\n", socket, *channels); return 0; } static int get_dimm_config(const struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; struct csrow_info *csr; struct pci_dev *pdev; int i, j; int csrow = 0; unsigned long last_page = 0; enum edac_type mode; enum mem_type mtype; /* Get data from the MC register, function 0 */ pdev = pvt->pci_mcr[0]; if (!pdev) return -ENODEV; /* Device 3 function 0 reads */ pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control); pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status); pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod); pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map); debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n", pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map); if (ECC_ENABLED(pvt)) { debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4); if (ECCx8(pvt)) mode = EDAC_S8ECD8ED; else mode = EDAC_S4ECD4ED; } else { debugf0("ECC disabled\n"); mode = EDAC_NONE; } /* FIXME: need to handle the error codes */ debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked " "x%x x 0x%x\n", numdimms(pvt->info.max_dod), numrank(pvt->info.max_dod >> 2), numbank(pvt->info.max_dod >> 4), numrow(pvt->info.max_dod >> 6), numcol(pvt->info.max_dod >> 9)); for (i = 0; i < NUM_CHANS; i++) { u32 data, dimm_dod[3], value[8]; if (!pvt->pci_ch[i][0]) continue; if (!CH_ACTIVE(pvt, i)) { debugf0("Channel %i is not active\n", i); continue; } if (CH_DISABLED(pvt, i)) { debugf0("Channel %i is disabled\n", i); continue; } /* Devices 4-6 function 0 */ pci_read_config_dword(pvt->pci_ch[i][0], MC_CHANNEL_DIMM_INIT_PARAMS, &data); pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ? 4 : 2; if (data & REGISTERED_DIMM) mtype = MEM_RDDR3; else mtype = MEM_DDR3; #if 0 if (data & THREE_DIMMS_PRESENT) pvt->channel[i].dimms = 3; else if (data & SINGLE_QUAD_RANK_PRESENT) pvt->channel[i].dimms = 1; else pvt->channel[i].dimms = 2; #endif /* Devices 4-6 function 1 */ pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM0, &dimm_dod[0]); pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM1, &dimm_dod[1]); pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM2, &dimm_dod[2]); debugf0("Ch%d phy rd%d, wr%d (0x%08x): " "%d ranks, %cDIMMs\n", i, RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), data, pvt->channel[i].ranks, (data & REGISTERED_DIMM) ? 'R' : 'U'); for (j = 0; j < 3; j++) { u32 banks, ranks, rows, cols; u32 size, npages; if (!DIMM_PRESENT(dimm_dod[j])) continue; banks = numbank(MC_DOD_NUMBANK(dimm_dod[j])); ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j])); rows = numrow(MC_DOD_NUMROW(dimm_dod[j])); cols = numcol(MC_DOD_NUMCOL(dimm_dod[j])); /* DDR3 has 8 I/O banks */ size = (rows * cols * banks * ranks) >> (20 - 3); pvt->channel[i].dimms++; debugf0("\tdimm %d %d Mb offset: %x, " "bank: %d, rank: %d, row: %#x, col: %#x\n", j, size, RANKOFFSET(dimm_dod[j]), banks, ranks, rows, cols); npages = MiB_TO_PAGES(size); csr = &mci->csrows[csrow]; csr->first_page = last_page + 1; last_page += npages; csr->last_page = last_page; csr->nr_pages = npages; csr->page_mask = 0; csr->grain = 8; csr->csrow_idx = csrow; csr->nr_channels = 1; csr->channels[0].chan_idx = i; csr->channels[0].ce_count = 0; pvt->csrow_map[i][j] = csrow; switch (banks) { case 4: csr->dtype = DEV_X4; break; case 8: csr->dtype = DEV_X8; break; case 16: csr->dtype = DEV_X16; break; default: csr->dtype = DEV_UNKNOWN; } csr->edac_mode = mode; csr->mtype = mtype; csrow++; } pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]); pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]); pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]); pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]); pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]); pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]); pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]); pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]); debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i); for (j = 0; j < 8; j++) debugf1("\t\t%#x\t%#x\t%#x\n", (value[j] >> 27) & 0x1, (value[j] >> 24) & 0x7, (value[j] && ((1 << 24) - 1))); } return 0; } /**************************************************************************** Error insertion routines ****************************************************************************/ /* The i7core has independent error injection features per channel. However, to have a simpler code, we don't allow enabling error injection on more than one channel. Also, since a change at an inject parameter will be applied only at enable, we're disabling error injection on all write calls to the sysfs nodes that controls the error code injection. */ static int disable_inject(const struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; pvt->inject.enable = 0; if (!pvt->pci_ch[pvt->inject.channel][0]) return -ENODEV; pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, 0); return 0; } /* * i7core inject inject.section * * accept and store error injection inject.section value * bit 0 - refers to the lower 32-byte half cacheline * bit 1 - refers to the upper 32-byte half cacheline */ static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = strict_strtoul(data, 10, &value); if ((rc < 0) || (value > 3)) return -EIO; pvt->inject.section = (u32) value; return count; } static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.section); } /* * i7core inject.type * * accept and store error injection inject.section value * bit 0 - repeat enable - Enable error repetition * bit 1 - inject ECC error * bit 2 - inject parity error */ static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = strict_strtoul(data, 10, &value); if ((rc < 0) || (value > 7)) return -EIO; pvt->inject.type = (u32) value; return count; } static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.type); } /* * i7core_inject_inject.eccmask_store * * The type of error (UE/CE) will depend on the inject.eccmask value: * Any bits set to a 1 will flip the corresponding ECC bit * Correctable errors can be injected by flipping 1 bit or the bits within * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an * uncorrectable error to be injected. */ static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = strict_strtoul(data, 10, &value); if (rc < 0) return -EIO; pvt->inject.eccmask = (u32) value; return count; } static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.eccmask); } /* * i7core_addrmatch * * The type of error (UE/CE) will depend on the inject.eccmask value: * Any bits set to a 1 will flip the corresponding ECC bit * Correctable errors can be injected by flipping 1 bit or the bits within * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an * uncorrectable error to be injected. */ #define DECLARE_ADDR_MATCH(param, limit) \ static ssize_t i7core_inject_store_##param( \ struct mem_ctl_info *mci, \ const char *data, size_t count) \ { \ struct i7core_pvt *pvt; \ long value; \ int rc; \ \ debugf1("%s()\n", __func__); \ pvt = mci->pvt_info; \ \ if (pvt->inject.enable) \ disable_inject(mci); \ \ if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\ value = -1; \ else { \ rc = strict_strtoul(data, 10, &value); \ if ((rc < 0) || (value >= limit)) \ return -EIO; \ } \ \ pvt->inject.param = value; \ \ return count; \ } \ \ static ssize_t i7core_inject_show_##param( \ struct mem_ctl_info *mci, \ char *data) \ { \ struct i7core_pvt *pvt; \ \ pvt = mci->pvt_info; \ debugf1("%s() pvt=%p\n", __func__, pvt); \ if (pvt->inject.param < 0) \ return sprintf(data, "any\n"); \ else \ return sprintf(data, "%d\n", pvt->inject.param);\ } #define ATTR_ADDR_MATCH(param) \ { \ .attr = { \ .name = #param, \ .mode = (S_IRUGO | S_IWUSR) \ }, \ .show = i7core_inject_show_##param, \ .store = i7core_inject_store_##param, \ } DECLARE_ADDR_MATCH(channel, 3); DECLARE_ADDR_MATCH(dimm, 3); DECLARE_ADDR_MATCH(rank, 4); DECLARE_ADDR_MATCH(bank, 32); DECLARE_ADDR_MATCH(page, 0x10000); DECLARE_ADDR_MATCH(col, 0x4000); static int write_and_test(struct pci_dev *dev, const int where, const u32 val) { u32 read; int count; debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), where, val); for (count = 0; count < 10; count++) { if (count) msleep(100); pci_write_config_dword(dev, where, val); pci_read_config_dword(dev, where, &read); if (read == val) return 0; } i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x " "write=%08x. Read=%08x\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), where, val, read); return -EINVAL; } /* * This routine prepares the Memory Controller for error injection. * The error will be injected when some process tries to write to the * memory that matches the given criteria. * The criteria can be set in terms of a mask where dimm, rank, bank, page * and col can be specified. * A -1 value for any of the mask items will make the MCU to ignore * that matching criteria for error injection. * * It should be noticed that the error will only happen after a write operation * on a memory that matches the condition. if REPEAT_EN is not enabled at * inject mask, then it will produce just one error. Otherwise, it will repeat * until the injectmask would be cleaned. * * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD * is reliable enough to check if the MC is using the * three channels. However, this is not clear at the datasheet. */ static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; u32 injectmask; u64 mask = 0; int rc; long enable; if (!pvt->pci_ch[pvt->inject.channel][0]) return 0; rc = strict_strtoul(data, 10, &enable); if ((rc < 0)) return 0; if (enable) { pvt->inject.enable = 1; } else { disable_inject(mci); return count; } /* Sets pvt->inject.dimm mask */ if (pvt->inject.dimm < 0) mask |= 1LL << 41; else { if (pvt->channel[pvt->inject.channel].dimms > 2) mask |= (pvt->inject.dimm & 0x3LL) << 35; else mask |= (pvt->inject.dimm & 0x1LL) << 36; } /* Sets pvt->inject.rank mask */ if (pvt->inject.rank < 0) mask |= 1LL << 40; else { if (pvt->channel[pvt->inject.channel].dimms > 2) mask |= (pvt->inject.rank & 0x1LL) << 34; else mask |= (pvt->inject.rank & 0x3LL) << 34; } /* Sets pvt->inject.bank mask */ if (pvt->inject.bank < 0) mask |= 1LL << 39; else mask |= (pvt->inject.bank & 0x15LL) << 30; /* Sets pvt->inject.page mask */ if (pvt->inject.page < 0) mask |= 1LL << 38; else mask |= (pvt->inject.page & 0xffff) << 14; /* Sets pvt->inject.column mask */ if (pvt->inject.col < 0) mask |= 1LL << 37; else mask |= (pvt->inject.col & 0x3fff); /* * bit 0: REPEAT_EN * bits 1-2: MASK_HALF_CACHELINE * bit 3: INJECT_ECC * bit 4: INJECT_ADDR_PARITY */ injectmask = (pvt->inject.type & 1) | (pvt->inject.section & 0x3) << 1 | (pvt->inject.type & 0x6) << (3 - 1); /* Unlock writes to registers - this register is write only */ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 0x2); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ADDR_MATCH, mask); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, injectmask); /* * This is something undocumented, based on my tests * Without writing 8 to this register, errors aren't injected. Not sure * why. */ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 8); debugf0("Error inject addr match 0x%016llx, ecc 0x%08x," " inject 0x%08x\n", mask, pvt->inject.eccmask, injectmask); return count; } static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; u32 injectmask; if (!pvt->pci_ch[pvt->inject.channel][0]) return 0; pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, &injectmask); debugf0("Inject error read: 0x%018x\n", injectmask); if (injectmask & 0x0c) pvt->inject.enable = 1; return sprintf(data, "%d\n", pvt->inject.enable); } #define DECLARE_COUNTER(param) \ static ssize_t i7core_show_counter_##param( \ struct mem_ctl_info *mci, \ char *data) \ { \ struct i7core_pvt *pvt = mci->pvt_info; \ \ debugf1("%s() \n", __func__); \ if (!pvt->ce_count_available || (pvt->is_registered)) \ return sprintf(data, "data unavailable\n"); \ return sprintf(data, "%lu\n", \ pvt->udimm_ce_count[param]); \ } #define ATTR_COUNTER(param) \ { \ .attr = { \ .name = __stringify(udimm##param), \ .mode = (S_IRUGO | S_IWUSR) \ }, \ .show = i7core_show_counter_##param \ } DECLARE_COUNTER(0); DECLARE_COUNTER(1); DECLARE_COUNTER(2); /* * Sysfs struct */ static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = { ATTR_ADDR_MATCH(channel), ATTR_ADDR_MATCH(dimm), ATTR_ADDR_MATCH(rank), ATTR_ADDR_MATCH(bank), ATTR_ADDR_MATCH(page), ATTR_ADDR_MATCH(col), { } /* End of list */ }; static const struct mcidev_sysfs_group i7core_inject_addrmatch = { .name = "inject_addrmatch", .mcidev_attr = i7core_addrmatch_attrs, }; static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { ATTR_COUNTER(0), ATTR_COUNTER(1), ATTR_COUNTER(2), { .attr = { .name = NULL } } }; static const struct mcidev_sysfs_group i7core_udimm_counters = { .name = "all_channel_counts", .mcidev_attr = i7core_udimm_counters_attrs, }; static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = { { .attr = { .name = "inject_section", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_section_show, .store = i7core_inject_section_store, }, { .attr = { .name = "inject_type", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_type_show, .store = i7core_inject_type_store, }, { .attr = { .name = "inject_eccmask", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_eccmask_show, .store = i7core_inject_eccmask_store, }, { .grp = &i7core_inject_addrmatch, }, { .attr = { .name = "inject_enable", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_enable_show, .store = i7core_inject_enable_store, }, { } /* End of list */ }; static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = { { .attr = { .name = "inject_section", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_section_show, .store = i7core_inject_section_store, }, { .attr = { .name = "inject_type", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_type_show, .store = i7core_inject_type_store, }, { .attr = { .name = "inject_eccmask", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_eccmask_show, .store = i7core_inject_eccmask_store, }, { .grp = &i7core_inject_addrmatch, }, { .attr = { .name = "inject_enable", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_enable_show, .store = i7core_inject_enable_store, }, { .grp = &i7core_udimm_counters, }, { } /* End of list */ }; /**************************************************************************** Device initialization routines: put/get, init/exit ****************************************************************************/ /* * i7core_put_all_devices 'put' all the devices that we have * reserved via 'get' */ static void i7core_put_devices(struct i7core_dev *i7core_dev) { int i; debugf0(__FILE__ ": %s()\n", __func__); for (i = 0; i < i7core_dev->n_devs; i++) { struct pci_dev *pdev = i7core_dev->pdev[i]; if (!pdev) continue; debugf0("Removing dev %02x:%02x.%d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); pci_dev_put(pdev); } } static void i7core_put_all_devices(void) { struct i7core_dev *i7core_dev, *tmp; list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) { i7core_put_devices(i7core_dev); free_i7core_dev(i7core_dev); } } static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table) { struct pci_dev *pdev = NULL; int i; /* * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses * aren't announced by acpi. So, we need to use a legacy scan probing * to detect them */ while (table && table->descr) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL); if (unlikely(!pdev)) { for (i = 0; i < MAX_SOCKET_BUSES; i++) pcibios_scan_specific_bus(255-i); } pci_dev_put(pdev); table++; } } static unsigned i7core_pci_lastbus(void) { int last_bus = 0, bus; struct pci_bus *b = NULL; while ((b = pci_find_next_bus(b)) != NULL) { bus = b->number; debugf0("Found bus %d\n", bus); if (bus > last_bus) last_bus = bus; } debugf0("Last bus %d\n", last_bus); return last_bus; } /* * i7core_get_all_devices Find and perform 'get' operation on the MCH's * device/functions we want to reference for this driver * * Need to 'get' device 16 func 1 and func 2 */ static int i7core_get_onedevice(struct pci_dev **prev, const struct pci_id_table *table, const unsigned devno, const unsigned last_bus) { struct i7core_dev *i7core_dev; const struct pci_id_descr *dev_descr = &table->descr[devno]; struct pci_dev *pdev = NULL; u8 bus = 0; u8 socket = 0; pdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_descr->dev_id, *prev); if (!pdev) { if (*prev) { *prev = pdev; return 0; } if (dev_descr->optional) return 0; if (devno == 0) return -ENODEV; i7core_printk(KERN_INFO, "Device not found: dev %02x.%d PCI ID %04x:%04x\n", dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* End of list, leave */ return -ENODEV; } bus = pdev->bus->number; socket = last_bus - bus; i7core_dev = get_i7core_dev(socket); if (!i7core_dev) { i7core_dev = alloc_i7core_dev(socket, table); if (!i7core_dev) { pci_dev_put(pdev); return -ENOMEM; } } if (i7core_dev->pdev[devno]) { i7core_printk(KERN_ERR, "Duplicated device for " "dev %02x:%02x.%d PCI ID %04x:%04x\n", bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); pci_dev_put(pdev); return -ENODEV; } i7core_dev->pdev[devno] = pdev; /* Sanity check */ if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev || PCI_FUNC(pdev->devfn) != dev_descr->func)) { i7core_printk(KERN_ERR, "Device PCI ID %04x:%04x " "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n", PCI_VENDOR_ID_INTEL, dev_descr->dev_id, bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), bus, dev_descr->dev, dev_descr->func); return -ENODEV; } /* Be sure that the device is enabled */ if (unlikely(pci_enable_device(pdev) < 0)) { i7core_printk(KERN_ERR, "Couldn't enable " "dev %02x:%02x.%d PCI ID %04x:%04x\n", bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); return -ENODEV; } debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n", socket, bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* * As stated on drivers/pci/search.c, the reference count for * @from is always decremented if it is not %NULL. So, as we need * to get all devices up to null, we need to do a get for the device */ pci_dev_get(pdev); *prev = pdev; return 0; } static int i7core_get_all_devices(void) { int i, rc, last_bus; struct pci_dev *pdev = NULL; const struct pci_id_table *table = pci_dev_table; last_bus = i7core_pci_lastbus(); while (table && table->descr) { for (i = 0; i < table->n_devs; i++) { pdev = NULL; do { rc = i7core_get_onedevice(&pdev, table, i, last_bus); if (rc < 0) { if (i == 0) { i = table->n_devs; break; } i7core_put_all_devices(); return -ENODEV; } } while (pdev); } table++; } return 0; } static int mci_bind_devs(struct mem_ctl_info *mci, struct i7core_dev *i7core_dev) { struct i7core_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; int i, func, slot; pvt->is_registered = 0; for (i = 0; i < i7core_dev->n_devs; i++) { pdev = i7core_dev->pdev[i]; if (!pdev) continue; func = PCI_FUNC(pdev->devfn); slot = PCI_SLOT(pdev->devfn); if (slot == 3) { if (unlikely(func > MAX_MCR_FUNC)) goto error; pvt->pci_mcr[func] = pdev; } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) { if (unlikely(func > MAX_CHAN_FUNC)) goto error; pvt->pci_ch[slot - 4][func] = pdev; } else if (!slot && !func) pvt->pci_noncore = pdev; else goto error; debugf0("Associated fn %d.%d, dev = %p, socket %d\n", PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev, i7core_dev->socket); if (PCI_SLOT(pdev->devfn) == 3 && PCI_FUNC(pdev->devfn) == 2) pvt->is_registered = 1; } return 0; error: i7core_printk(KERN_ERR, "Device %d, function %d " "is out of the expected range\n", slot, func); return -EINVAL; } /**************************************************************************** Error check routines ****************************************************************************/ static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci, const int chan, const int dimm, const int add) { char *msg; struct i7core_pvt *pvt = mci->pvt_info; int row = pvt->csrow_map[chan][dimm], i; for (i = 0; i < add; i++) { msg = kasprintf(GFP_KERNEL, "Corrected error " "(Socket=%d channel=%d dimm=%d)", pvt->i7core_dev->socket, chan, dimm); edac_mc_handle_fbd_ce(mci, row, 0, msg); kfree (msg); } } static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, const int chan, const int new0, const int new1, const int new2) { struct i7core_pvt *pvt = mci->pvt_info; int add0 = 0, add1 = 0, add2 = 0; /* Updates CE counters if it is not the first time here */ if (pvt->ce_count_available) { /* Updates CE counters */ add2 = new2 - pvt->rdimm_last_ce_count[chan][2]; add1 = new1 - pvt->rdimm_last_ce_count[chan][1]; add0 = new0 - pvt->rdimm_last_ce_count[chan][0]; if (add2 < 0) add2 += 0x7fff; pvt->rdimm_ce_count[chan][2] += add2; if (add1 < 0) add1 += 0x7fff; pvt->rdimm_ce_count[chan][1] += add1; if (add0 < 0) add0 += 0x7fff; pvt->rdimm_ce_count[chan][0] += add0; } else pvt->ce_count_available = 1; /* Store the new values */ pvt->rdimm_last_ce_count[chan][2] = new2; pvt->rdimm_last_ce_count[chan][1] = new1; pvt->rdimm_last_ce_count[chan][0] = new0; /*updated the edac core */ if (add0 != 0) i7core_rdimm_update_csrow(mci, chan, 0, add0); if (add1 != 0) i7core_rdimm_update_csrow(mci, chan, 1, add1); if (add2 != 0) i7core_rdimm_update_csrow(mci, chan, 2, add2); } static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 rcv[3][2]; int i, new0, new1, new2; /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0, &rcv[0][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1, &rcv[0][1]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2, &rcv[1][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3, &rcv[1][1]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4, &rcv[2][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5, &rcv[2][1]); for (i = 0 ; i < 3; i++) { debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n", (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]); /*if the channel has 3 dimms*/ if (pvt->channel[i].dimms > 2) { new0 = DIMM_BOT_COR_ERR(rcv[i][0]); new1 = DIMM_TOP_COR_ERR(rcv[i][0]); new2 = DIMM_BOT_COR_ERR(rcv[i][1]); } else { new0 = DIMM_TOP_COR_ERR(rcv[i][0]) + DIMM_BOT_COR_ERR(rcv[i][0]); new1 = DIMM_TOP_COR_ERR(rcv[i][1]) + DIMM_BOT_COR_ERR(rcv[i][1]); new2 = 0; } i7core_rdimm_update_ce_count(mci, i, new0, new1, new2); } } /* This function is based on the device 3 function 4 registers as described on: * Intel Xeon Processor 5500 Series Datasheet Volume 2 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf * also available at: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 rcv1, rcv0; int new0, new1, new2; if (!pvt->pci_mcr[4]) { debugf0("%s MCR registers not found\n", __func__); return; } /* Corrected test errors */ pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1); pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0); /* Store the new values */ new2 = DIMM2_COR_ERR(rcv1); new1 = DIMM1_COR_ERR(rcv0); new0 = DIMM0_COR_ERR(rcv0); /* Updates CE counters if it is not the first time here */ if (pvt->ce_count_available) { /* Updates CE counters */ int add0, add1, add2; add2 = new2 - pvt->udimm_last_ce_count[2]; add1 = new1 - pvt->udimm_last_ce_count[1]; add0 = new0 - pvt->udimm_last_ce_count[0]; if (add2 < 0) add2 += 0x7fff; pvt->udimm_ce_count[2] += add2; if (add1 < 0) add1 += 0x7fff; pvt->udimm_ce_count[1] += add1; if (add0 < 0) add0 += 0x7fff; pvt->udimm_ce_count[0] += add0; if (add0 | add1 | add2) i7core_printk(KERN_ERR, "New Corrected error(s): " "dimm0: +%d, dimm1: +%d, dimm2 +%d\n", add0, add1, add2); } else pvt->ce_count_available = 1; /* Store the new values */ pvt->udimm_last_ce_count[2] = new2; pvt->udimm_last_ce_count[1] = new1; pvt->udimm_last_ce_count[0] = new0; } /* * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32 * Architectures Software Developer’s Manual Volume 3B. * Nehalem are defined as family 0x06, model 0x1a * * The MCA registers used here are the following ones: * struct mce field MCA Register * m->status MSR_IA32_MC8_STATUS * m->addr MSR_IA32_MC8_ADDR * m->misc MSR_IA32_MC8_MISC * In the case of Nehalem, the error information is masked at .status and .misc * fields */ static void i7core_mce_output_error(struct mem_ctl_info *mci, const struct mce *m) { struct i7core_pvt *pvt = mci->pvt_info; char *type, *optype, *err, *msg; unsigned long error = m->status & 0x1ff0000l; u32 optypenum = (m->status >> 4) & 0x07; u32 core_err_cnt = (m->status >> 38) && 0x7fff; u32 dimm = (m->misc >> 16) & 0x3; u32 channel = (m->misc >> 18) & 0x3; u32 syndrome = m->misc >> 32; u32 errnum = find_first_bit(&error, 32); int csrow; if (m->mcgstatus & 1) type = "FATAL"; else type = "NON_FATAL"; switch (optypenum) { case 0: optype = "generic undef request"; break; case 1: optype = "read error"; break; case 2: optype = "write error"; break; case 3: optype = "addr/cmd error"; break; case 4: optype = "scrubbing error"; break; default: optype = "reserved"; break; } switch (errnum) { case 16: err = "read ECC error"; break; case 17: err = "RAS ECC error"; break; case 18: err = "write parity error"; break; case 19: err = "redundacy loss"; break; case 20: err = "reserved"; break; case 21: err = "memory range error"; break; case 22: err = "RTID out of range"; break; case 23: err = "address parity error"; break; case 24: err = "byte enable parity error"; break; default: err = "unknown"; } /* FIXME: should convert addr into bank and rank information */ msg = kasprintf(GFP_ATOMIC, "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, " "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n", type, (long long) m->addr, m->cpu, dimm, channel, syndrome, core_err_cnt, (long long)m->status, (long long)m->misc, optype, err); debugf0("%s", msg); csrow = pvt->csrow_map[channel][dimm]; /* Call the helper to output message */ if (m->mcgstatus & 1) edac_mc_handle_fbd_ue(mci, csrow, 0, 0 /* FIXME: should be channel here */, msg); else if (!pvt->is_registered) edac_mc_handle_fbd_ce(mci, csrow, 0 /* FIXME: should be channel here */, msg); kfree(msg); } /* * i7core_check_error Retrieve and process errors reported by the * hardware. Called by the Core module. */ static void i7core_check_error(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; int i; unsigned count = 0; struct mce *m; /* * MCE first step: Copy all mce errors into a temporary buffer * We use a double buffering here, to reduce the risk of * losing an error. */ smp_rmb(); count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in) % MCE_LOG_LEN; if (!count) goto check_ce_error; m = pvt->mce_outentry; if (pvt->mce_in + count > MCE_LOG_LEN) { unsigned l = MCE_LOG_LEN - pvt->mce_in; memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l); smp_wmb(); pvt->mce_in = 0; count -= l; m += l; } memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count); smp_wmb(); pvt->mce_in += count; smp_rmb(); if (pvt->mce_overrun) { i7core_printk(KERN_ERR, "Lost %d memory errors\n", pvt->mce_overrun); smp_wmb(); pvt->mce_overrun = 0; } /* * MCE second step: parse errors and display */ for (i = 0; i < count; i++) i7core_mce_output_error(mci, &pvt->mce_outentry[i]); /* * Now, let's increment CE error counts */ check_ce_error: if (!pvt->is_registered) i7core_udimm_check_mc_ecc_err(mci); else i7core_rdimm_check_mc_ecc_err(mci); } /* * i7core_mce_check_error Replicates mcelog routine to get errors * This routine simply queues mcelog errors, and * return. The error itself should be handled later * by i7core_check_error. * WARNING: As this routine should be called at NMI time, extra care should * be taken to avoid deadlocks, and to be as fast as possible. */ static int i7core_mce_check_error(void *priv, struct mce *mce) { struct mem_ctl_info *mci = priv; struct i7core_pvt *pvt = mci->pvt_info; /* * Just let mcelog handle it if the error is * outside the memory controller */ if (((mce->status & 0xffff) >> 7) != 1) return 0; /* Bank 8 registers are the only ones that we know how to handle */ if (mce->bank != 8) return 0; #ifdef CONFIG_SMP /* Only handle if it is the right mc controller */ if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket) return 0; #endif smp_rmb(); if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { smp_wmb(); pvt->mce_overrun++; return 0; } /* Copy memory error at the ringbuffer */ memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce)); smp_wmb(); pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN; /* Handle fatal errors immediately */ if (mce->mcgstatus & 1) i7core_check_error(mci); /* Advise mcelog that the errors were handled */ return 1; } static void i7core_pci_ctl_create(struct i7core_pvt *pvt) { pvt->i7core_pci = edac_pci_create_generic_ctl( &pvt->i7core_dev->pdev[0]->dev, EDAC_MOD_STR); if (unlikely(!pvt->i7core_pci)) pr_warn("Unable to setup PCI error report via EDAC\n"); } static void i7core_pci_ctl_release(struct i7core_pvt *pvt) { if (likely(pvt->i7core_pci)) edac_pci_release_generic_ctl(pvt->i7core_pci); else i7core_printk(KERN_ERR, "Couldn't find mem_ctl_info for socket %d\n", pvt->i7core_dev->socket); pvt->i7core_pci = NULL; } static void i7core_unregister_mci(struct i7core_dev *i7core_dev) { struct mem_ctl_info *mci = i7core_dev->mci; struct i7core_pvt *pvt; if (unlikely(!mci || !mci->pvt_info)) { debugf0("MC: " __FILE__ ": %s(): dev = %p\n", __func__, &i7core_dev->pdev[0]->dev); i7core_printk(KERN_ERR, "Couldn't find mci handler\n"); return; } pvt = mci->pvt_info; debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", __func__, mci, &i7core_dev->pdev[0]->dev); /* Disable MCE NMI handler */ edac_mce_unregister(&pvt->edac_mce); /* Disable EDAC polling */ i7core_pci_ctl_release(pvt); /* Remove MC sysfs nodes */ edac_mc_del_mc(mci->dev); debugf1("%s: free mci struct\n", mci->ctl_name); kfree(mci->ctl_name); edac_mc_free(mci); i7core_dev->mci = NULL; } static int i7core_register_mci(struct i7core_dev *i7core_dev) { struct mem_ctl_info *mci; struct i7core_pvt *pvt; int rc, channels, csrows; /* Check the number of active and not disabled channels */ rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows); if (unlikely(rc < 0)) return rc; /* allocate a new MC control structure */ mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket); if (unlikely(!mci)) return -ENOMEM; debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", __func__, mci, &i7core_dev->pdev[0]->dev); pvt = mci->pvt_info; memset(pvt, 0, sizeof(*pvt)); /* Associates i7core_dev and mci for future usage */ pvt->i7core_dev = i7core_dev; i7core_dev->mci = mci; /* * FIXME: how to handle RDDR3 at MCI level? It is possible to have * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different * memory channels */ mci->mtype_cap = MEM_FLAG_DDR3; mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; mci->mod_name = "i7core_edac.c"; mci->mod_ver = I7CORE_REVISION; mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket); mci->dev_name = pci_name(i7core_dev->pdev[0]); mci->ctl_page_to_phys = NULL; /* Store pci devices at mci for faster access */ rc = mci_bind_devs(mci, i7core_dev); if (unlikely(rc < 0)) goto fail0; if (pvt->is_registered) mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs; else mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs; /* Get dimm basic config */ get_dimm_config(mci); /* record ptr to the generic device */ mci->dev = &i7core_dev->pdev[0]->dev; /* Set the function pointer to an actual operation function */ mci->edac_check = i7core_check_error; /* add this new MC control structure to EDAC's list of MCs */ if (unlikely(edac_mc_add_mc(mci))) { debugf0("MC: " __FILE__ ": %s(): failed edac_mc_add_mc()\n", __func__); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ rc = -EINVAL; goto fail0; } /* Default error mask is any memory */ pvt->inject.channel = 0; pvt->inject.dimm = -1; pvt->inject.rank = -1; pvt->inject.bank = -1; pvt->inject.page = -1; pvt->inject.col = -1; /* allocating generic PCI control info */ i7core_pci_ctl_create(pvt); /* Registers on edac_mce in order to receive memory errors */ pvt->edac_mce.priv = mci; pvt->edac_mce.check_error = i7core_mce_check_error; rc = edac_mce_register(&pvt->edac_mce); if (unlikely(rc < 0)) { debugf0("MC: " __FILE__ ": %s(): failed edac_mce_register()\n", __func__); goto fail1; } return 0; fail1: i7core_pci_ctl_release(pvt); edac_mc_del_mc(mci->dev); fail0: kfree(mci->ctl_name); edac_mc_free(mci); i7core_dev->mci = NULL; return rc; } /* * i7core_probe Probe for ONE instance of device to see if it is * present. * return: * 0 for FOUND a device * < 0 for error code */ static int __devinit i7core_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int rc; struct i7core_dev *i7core_dev; /* get the pci devices we want to reserve for our use */ mutex_lock(&i7core_edac_lock); /* * All memory controllers are allocated at the first pass. */ if (unlikely(probed >= 1)) { mutex_unlock(&i7core_edac_lock); return -ENODEV; } probed++; rc = i7core_get_all_devices(); if (unlikely(rc < 0)) goto fail0; list_for_each_entry(i7core_dev, &i7core_edac_list, list) { rc = i7core_register_mci(i7core_dev); if (unlikely(rc < 0)) goto fail1; } i7core_printk(KERN_INFO, "Driver loaded.\n"); mutex_unlock(&i7core_edac_lock); return 0; fail1: list_for_each_entry(i7core_dev, &i7core_edac_list, list) i7core_unregister_mci(i7core_dev); i7core_put_all_devices(); fail0: mutex_unlock(&i7core_edac_lock); return rc; } /* * i7core_remove destructor for one instance of device * */ static void __devexit i7core_remove(struct pci_dev *pdev) { struct i7core_dev *i7core_dev; debugf0(__FILE__ ": %s()\n", __func__); /* * we have a trouble here: pdev value for removal will be wrong, since * it will point to the X58 register used to detect that the machine * is a Nehalem or upper design. However, due to the way several PCI * devices are grouped together to provide MC functionality, we need * to use a different method for releasing the devices */ mutex_lock(&i7core_edac_lock); if (unlikely(!probed)) { mutex_unlock(&i7core_edac_lock); return; } list_for_each_entry(i7core_dev, &i7core_edac_list, list) i7core_unregister_mci(i7core_dev); /* Release PCI resources */ i7core_put_all_devices(); probed--; mutex_unlock(&i7core_edac_lock); } MODULE_DEVICE_TABLE(pci, i7core_pci_tbl); /* * i7core_driver pci_driver structure for this module * */ static struct pci_driver i7core_driver = { .name = "i7core_edac", .probe = i7core_probe, .remove = __devexit_p(i7core_remove), .id_table = i7core_pci_tbl, }; /* * i7core_init Module entry function * Try to initialize this module for its devices */ static int __init i7core_init(void) { int pci_rc; debugf2("MC: " __FILE__ ": %s()\n", __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); if (use_pci_fixup) i7core_xeon_pci_fixup(pci_dev_table); pci_rc = pci_register_driver(&i7core_driver); if (pci_rc >= 0) return 0; i7core_printk(KERN_ERR, "Failed to register device with error %d.\n", pci_rc); return pci_rc; } /* * i7core_exit() Module exit function * Unregister the driver */ static void __exit i7core_exit(void) { debugf2("MC: " __FILE__ ": %s()\n", __func__); pci_unregister_driver(&i7core_driver); } module_init(i7core_init); module_exit(i7core_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - " I7CORE_REVISION); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
jiangbeilengyu/famkernel
drivers/staging/gma500/psb_intel_sdvo.c
150
36324
/* * Copyright (c) 2006-2007 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Authors: * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> #include <linux/delay.h> /* #include <drm/drm_crtc.h> */ #include <drm/drmP.h> #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" #include "psb_intel_sdvo_regs.h" struct psb_intel_sdvo_priv { struct psb_intel_i2c_chan *i2c_bus; int slaveaddr; int output_device; u16 active_outputs; struct psb_intel_sdvo_caps caps; int pixel_clock_min, pixel_clock_max; int save_sdvo_mult; u16 save_active_outputs; struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; struct psb_intel_sdvo_dtd save_output_dtd[16]; u32 save_SDVOX; u8 in_out_map[4]; u8 by_input_wiring; u32 active_device; }; /** * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to * comments in the BIOS). */ void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output, u32 val) { struct drm_device *dev = psb_intel_output->base.dev; struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; u32 bval = val, cval = val; int i; if (sdvo_priv->output_device == SDVOB) cval = REG_READ(SDVOC); else bval = REG_READ(SDVOB); /* * Write the registers twice for luck. Sometimes, * writing them only once doesn't appear to 'stick'. * The BIOS does this too. Yay, magic */ for (i = 0; i < 2; i++) { REG_WRITE(SDVOB, bval); REG_READ(SDVOB); REG_WRITE(SDVOC, cval); REG_READ(SDVOC); } } static bool psb_intel_sdvo_read_byte( struct psb_intel_output *psb_intel_output, u8 addr, u8 *ch) { struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; u8 out_buf[2]; u8 buf[2]; int ret; struct i2c_msg msgs[] = { { .addr = sdvo_priv->i2c_bus->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { .addr = sdvo_priv->i2c_bus->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = buf, } }; out_buf[0] = addr; out_buf[1] = 0; ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2); if (ret == 2) { /* DRM_DEBUG("got back from addr %02X = %02x\n", * out_buf[0], buf[0]); */ *ch = buf[0]; return true; } DRM_DEBUG("i2c transfer returned %d\n", ret); return false; } static bool psb_intel_sdvo_write_byte( struct psb_intel_output *psb_intel_output, int addr, u8 ch) { u8 out_buf[2]; struct i2c_msg msgs[] = { { .addr = psb_intel_output->i2c_bus->slave_addr, .flags = 0, .len = 2, .buf = out_buf, } }; out_buf[0] = addr; out_buf[1] = ch; if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1) return true; return false; } #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} /** Mapping of command numbers to names, for debug output */ static const struct _sdvo_cmd_name { u8 cmd; char *name; } sdvo_cmd_names[] = { SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), SDVO_CMD_NAME_ENTRY (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),}; #define SDVO_NAME(dev_priv) \ ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") #define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv) static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output, u8 cmd, void *args, int args_len) { struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; int i; if (1) { DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); for (i = 0; i < args_len; i++) printk(KERN_INFO"%02X ", ((u8 *) args)[i]); for (; i < 8; i++) printk(" "); for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) { if (cmd == sdvo_cmd_names[i].cmd) { printk("(%s)", sdvo_cmd_names[i].name); break; } } if (i == sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0])) printk("(%02X)", cmd); printk("\n"); } for (i = 0; i < args_len; i++) { psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_ARG_0 - i, ((u8 *) args)[i]); } psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd); } static const char *const cmd_status_names[] = { "Power on", "Success", "Not supported", "Invalid arg", "Pending", "Target not specified", "Scaling not supported" }; static u8 psb_intel_sdvo_read_response( struct psb_intel_output *psb_intel_output, void *response, int response_len) { struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; int i; u8 status; u8 retry = 50; while (retry--) { /* Read the command response */ for (i = 0; i < response_len; i++) { psb_intel_sdvo_read_byte(psb_intel_output, SDVO_I2C_RETURN_0 + i, &((u8 *) response)[i]); } /* read the return status */ psb_intel_sdvo_read_byte(psb_intel_output, SDVO_I2C_CMD_STATUS, &status); if (1) { DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv)); for (i = 0; i < response_len; i++) printk(KERN_INFO"%02X ", ((u8 *) response)[i]); for (; i < 8; i++) printk(" "); if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) printk(KERN_INFO"(%s)", cmd_status_names[status]); else printk(KERN_INFO"(??? %d)", status); printk("\n"); } if (status != SDVO_CMD_STATUS_PENDING) return status; mdelay(50); } return status; } int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) { if (mode->clock >= 100000) return 1; else if (mode->clock >= 50000) return 2; else return 4; } /** * Don't check status code from this as it switches the bus back to the * SDVO chips which defeats the purpose of doing a bus switch in the first * place. */ void psb_intel_sdvo_set_control_bus_switch( struct psb_intel_output *psb_intel_output, u8 target) { psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); } static bool psb_intel_sdvo_set_target_input( struct psb_intel_output *psb_intel_output, bool target_0, bool target_1) { struct psb_intel_sdvo_set_target_input_args targets = { 0 }; u8 status; if (target_0 && target_1) return SDVO_CMD_STATUS_NOTSUPP; if (target_1) targets.target_1 = 1; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, sizeof(targets)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); return status == SDVO_CMD_STATUS_SUCCESS; } /** * Return whether each input is trained. * * This function is making an assumption about the layout of the response, * which should be checked against the docs. */ static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output *psb_intel_output, bool *input_1, bool *input_2) { struct psb_intel_sdvo_get_trained_inputs_response response; u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &response, sizeof(response)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; *input_1 = response.input0_trained; *input_2 = response.input1_trained; return true; } static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output *psb_intel_output, u16 *outputs) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, outputs, sizeof(*outputs)); return status == SDVO_CMD_STATUS_SUCCESS; } static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output *psb_intel_output, u16 outputs) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, sizeof(outputs)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); return status == SDVO_CMD_STATUS_SUCCESS; } static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output *psb_intel_output, int mode) { u8 status, state = SDVO_ENCODER_STATE_ON; switch (mode) { case DRM_MODE_DPMS_ON: state = SDVO_ENCODER_STATE_ON; break; case DRM_MODE_DPMS_STANDBY: state = SDVO_ENCODER_STATE_STANDBY; break; case DRM_MODE_DPMS_SUSPEND: state = SDVO_ENCODER_STATE_SUSPEND; break; case DRM_MODE_DPMS_OFF: state = SDVO_ENCODER_STATE_OFF; break; } psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); return status == SDVO_CMD_STATUS_SUCCESS; } static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output *psb_intel_output, int *clock_min, int *clock_max) { struct psb_intel_sdvo_pixel_clock_range clocks; u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &clocks, sizeof(clocks)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; /* Convert the values from units of 10 kHz to kHz. */ *clock_min = clocks.min * 10; *clock_max = clocks.max * 10; return true; } static bool psb_intel_sdvo_set_target_output( struct psb_intel_output *psb_intel_output, u16 outputs) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, sizeof(outputs)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); return status == SDVO_CMD_STATUS_SUCCESS; } static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output, u8 cmd, struct psb_intel_sdvo_dtd *dtd) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1, sizeof(dtd->part1)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2, sizeof(dtd->part2)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } static bool psb_intel_sdvo_get_input_timing( struct psb_intel_output *psb_intel_output, struct psb_intel_sdvo_dtd *dtd) { return psb_intel_sdvo_get_timing(psb_intel_output, SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); } static bool psb_intel_sdvo_set_timing( struct psb_intel_output *psb_intel_output, u8 cmd, struct psb_intel_sdvo_dtd *dtd) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } static bool psb_intel_sdvo_set_input_timing( struct psb_intel_output *psb_intel_output, struct psb_intel_sdvo_dtd *dtd) { return psb_intel_sdvo_set_timing(psb_intel_output, SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); } static bool psb_intel_sdvo_set_output_timing( struct psb_intel_output *psb_intel_output, struct psb_intel_sdvo_dtd *dtd) { return psb_intel_sdvo_set_timing(psb_intel_output, SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); } static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output *psb_intel_output) { u8 response, status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n"); return SDVO_CLOCK_RATE_MULT_1X; } else { DRM_DEBUG("Current clock rate multiplier: %d\n", response); } return response; } static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output *psb_intel_output, u8 val) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output *output, u32 in0outputmask, u32 in1outputmask) { u8 byArgs[4]; u8 status; int i; struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv; /* Make all fields of the args/ret to zero */ memset(byArgs, 0, sizeof(byArgs)); /* Fill up the argument values; */ byArgs[0] = (u8) (in0outputmask & 0xFF); byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF); byArgs[2] = (u8) (in1outputmask & 0xFF); byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF); /*save inoutmap arg here*/ for (i = 0; i < 4; i++) sdvo_priv->in_out_map[i] = byArgs[0]; psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4); status = psb_intel_sdvo_read_response(output, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } static void psb_intel_sdvo_set_iomap(struct psb_intel_output *output) { u32 dwCurrentSDVOIn0 = 0; u32 dwCurrentSDVOIn1 = 0; u32 dwDevMask = 0; struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv; /* Please DO NOT change the following code. */ /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */ /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */ if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) { switch (sdvo_priv->active_device) { case SDVO_DEVICE_LVDS: dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1; break; case SDVO_DEVICE_TMDS: dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1; break; case SDVO_DEVICE_TV: dwDevMask = SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 | SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1; break; case SDVO_DEVICE_CRT: dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1; break; } dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask); } else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) { switch (sdvo_priv->active_device) { case SDVO_DEVICE_LVDS: dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1; break; case SDVO_DEVICE_TMDS: dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1; break; case SDVO_DEVICE_TV: dwDevMask = SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 | SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1; break; case SDVO_DEVICE_CRT: dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1; break; } dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask); } psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0, dwCurrentSDVOIn1); } static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO * device will be told of the multiplier during mode_set. */ adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode); return true; } static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder); struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; u16 width, height; u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len; u16 h_sync_offset, v_sync_offset; u32 sdvox; struct psb_intel_sdvo_dtd output_dtd; int sdvo_pixel_multiply; if (!mode) return; psb_intel_sdvo_set_target_output(psb_intel_output, 0); width = mode->crtc_hdisplay; height = mode->crtc_vdisplay; /* do some mode translations */ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; output_dtd.part1.clock = mode->clock / 10; output_dtd.part1.h_active = width & 0xff; output_dtd.part1.h_blank = h_blank_len & 0xff; output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) | ((h_blank_len >> 8) & 0xf); output_dtd.part1.v_active = height & 0xff; output_dtd.part1.v_blank = v_blank_len & 0xff; output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) | ((v_blank_len >> 8) & 0xf); output_dtd.part2.h_sync_off = h_sync_offset; output_dtd.part2.h_sync_width = h_sync_len & 0xff; output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | (v_sync_len & 0xf); output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4); output_dtd.part2.dtd_flags = 0x18; if (mode->flags & DRM_MODE_FLAG_PHSYNC) output_dtd.part2.dtd_flags |= 0x2; if (mode->flags & DRM_MODE_FLAG_PVSYNC) output_dtd.part2.dtd_flags |= 0x4; output_dtd.part2.sdvo_flags = 0; output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0; output_dtd.part2.reserved = 0; /* Set the output timing to the screen */ psb_intel_sdvo_set_target_output(psb_intel_output, sdvo_priv->active_outputs); /* Set the input timing to the screen. Assume always input 0. */ psb_intel_sdvo_set_target_input(psb_intel_output, true, false); psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd); /* We would like to use i830_sdvo_create_preferred_input_timing() to * provide the device with a timing it can support, if it supports that * feature. However, presumably we would need to adjust the CRTC to * output the preferred timing, and we don't support that currently. */ psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd); switch (psb_intel_sdvo_get_pixel_multiplier(mode)) { case 1: psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, SDVO_CLOCK_RATE_MULT_1X); break; case 2: psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, SDVO_CLOCK_RATE_MULT_2X); break; case 4: psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, SDVO_CLOCK_RATE_MULT_4X); break; } /* Set the SDVO control regs. */ sdvox = REG_READ(sdvo_priv->output_device); switch (sdvo_priv->output_device) { case SDVOB: sdvox &= SDVOB_PRESERVE_MASK; break; case SDVOC: sdvox &= SDVOC_PRESERVE_MASK; break; } sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; if (psb_intel_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode); psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox); psb_intel_sdvo_set_iomap(psb_intel_output); } static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder); struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; u32 temp; if (mode != DRM_MODE_DPMS_ON) { psb_intel_sdvo_set_active_outputs(psb_intel_output, 0); if (0) psb_intel_sdvo_set_encoder_power_state( psb_intel_output, mode); if (mode == DRM_MODE_DPMS_OFF) { temp = REG_READ(sdvo_priv->output_device); if ((temp & SDVO_ENABLE) != 0) { psb_intel_sdvo_write_sdvox(psb_intel_output, temp & ~SDVO_ENABLE); } } } else { bool input1, input2; int i; u8 status; temp = REG_READ(sdvo_priv->output_device); if ((temp & SDVO_ENABLE) == 0) psb_intel_sdvo_write_sdvox(psb_intel_output, temp | SDVO_ENABLE); for (i = 0; i < 2; i++) psb_intel_wait_for_vblank(dev); status = psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1, &input2); /* Warn if the device reported failure to sync. * A lot of SDVO devices fail to notify of sync, but it's * a given it the status is a success, we succeeded. */ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { DRM_DEBUG ("First %s output reported failure to sync\n", SDVO_NAME(sdvo_priv)); } if (0) psb_intel_sdvo_set_encoder_power_state( psb_intel_output, mode); psb_intel_sdvo_set_active_outputs(psb_intel_output, sdvo_priv->active_outputs); } return; } static void psb_intel_sdvo_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; /*int o;*/ sdvo_priv->save_sdvo_mult = psb_intel_sdvo_get_clock_rate_mult(psb_intel_output); psb_intel_sdvo_get_active_outputs(psb_intel_output, &sdvo_priv->save_active_outputs); if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { psb_intel_sdvo_set_target_input(psb_intel_output, true, false); psb_intel_sdvo_get_input_timing(psb_intel_output, &sdvo_priv->save_input_dtd_1); } if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { psb_intel_sdvo_set_target_input(psb_intel_output, false, true); psb_intel_sdvo_get_input_timing(psb_intel_output, &sdvo_priv->save_input_dtd_2); } sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device); /*TODO: save the in_out_map state*/ } static void psb_intel_sdvo_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; /*int o;*/ int i; bool input1, input2; u8 status; psb_intel_sdvo_set_active_outputs(psb_intel_output, 0); if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { psb_intel_sdvo_set_target_input(psb_intel_output, true, false); psb_intel_sdvo_set_input_timing(psb_intel_output, &sdvo_priv->save_input_dtd_1); } if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { psb_intel_sdvo_set_target_input(psb_intel_output, false, true); psb_intel_sdvo_set_input_timing(psb_intel_output, &sdvo_priv->save_input_dtd_2); } psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, sdvo_priv->save_sdvo_mult); REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX); if (sdvo_priv->save_SDVOX & SDVO_ENABLE) { for (i = 0; i < 2; i++) psb_intel_wait_for_vblank(dev); status = psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1, &input2); if (status == SDVO_CMD_STATUS_SUCCESS && !input1) DRM_DEBUG ("First %s output reported failure to sync\n", SDVO_NAME(sdvo_priv)); } psb_intel_sdvo_set_active_outputs(psb_intel_output, sdvo_priv->save_active_outputs); /*TODO: restore in_out_map*/ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_IN_OUT_MAP, sdvo_priv->in_out_map, 4); psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); } static int psb_intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; if (sdvo_priv->pixel_clock_min > mode->clock) return MODE_CLOCK_LOW; if (sdvo_priv->pixel_clock_max < mode->clock) return MODE_CLOCK_HIGH; return MODE_OK; } static bool psb_intel_sdvo_get_capabilities( struct psb_intel_output *psb_intel_output, struct psb_intel_sdvo_caps *caps) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, caps, sizeof(*caps)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB) { struct drm_connector *connector = NULL; struct psb_intel_output *iout = NULL; struct psb_intel_sdvo_priv *sdvo; /* find the sdvo connector */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { iout = to_psb_intel_output(connector); if (iout->type != INTEL_OUTPUT_SDVO) continue; sdvo = iout->dev_priv; if (sdvo->output_device == SDVOB && sdvoB) return connector; if (sdvo->output_device == SDVOC && !sdvoB) return connector; } return NULL; } int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector) { u8 response[2]; u8 status; struct psb_intel_output *psb_intel_output; DRM_DEBUG("\n"); if (!connector) return 0; psb_intel_output = to_psb_intel_output(connector); psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2); if (response[0] != 0) return 1; return 0; } void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on) { u8 response[2]; u8 status; struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); psb_intel_sdvo_read_response(psb_intel_output, &response, 2); if (on) { psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2); psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } else { response[0] = 0; response[1] = 0; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); psb_intel_sdvo_read_response(psb_intel_output, &response, 2); } static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector *connector, bool force) { u8 response[2]; u8 status; struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2); DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); if ((response[0] != 0) || (response[1] != 0)) return connector_status_connected; else return connector_status_disconnected; } static int psb_intel_sdvo_get_modes(struct drm_connector *connector) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); /* set the bus switch and get the modes */ psb_intel_sdvo_set_control_bus_switch(psb_intel_output, SDVO_CONTROL_BUS_DDC2); psb_intel_ddc_get_modes(psb_intel_output); if (list_empty(&connector->probed_modes)) return 0; return 1; } static void psb_intel_sdvo_destroy(struct drm_connector *connector) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); if (psb_intel_output->i2c_bus) psb_intel_i2c_destroy(psb_intel_output->i2c_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(psb_intel_output); } static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { .dpms = psb_intel_sdvo_dpms, .mode_fixup = psb_intel_sdvo_mode_fixup, .prepare = psb_intel_encoder_prepare, .mode_set = psb_intel_sdvo_mode_set, .commit = psb_intel_encoder_commit, }; static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { .dpms = drm_helper_connector_dpms, .save = psb_intel_sdvo_save, .restore = psb_intel_sdvo_restore, .detect = psb_intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = psb_intel_sdvo_destroy, }; static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = { .get_modes = psb_intel_sdvo_get_modes, .mode_valid = psb_intel_sdvo_mode_valid, .best_encoder = psb_intel_best_encoder, }; void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); } static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = { .destroy = psb_intel_sdvo_enc_destroy, }; void psb_intel_sdvo_init(struct drm_device *dev, int output_device) { struct drm_connector *connector; struct psb_intel_output *psb_intel_output; struct psb_intel_sdvo_priv *sdvo_priv; struct psb_intel_i2c_chan *i2cbus = NULL; int connector_type; u8 ch[0x40]; int i; int encoder_type, output_id; psb_intel_output = kcalloc(sizeof(struct psb_intel_output) + sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL); if (!psb_intel_output) return; connector = &psb_intel_output->base; drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs, DRM_MODE_CONNECTOR_Unknown); drm_connector_helper_add(connector, &psb_intel_sdvo_connector_helper_funcs); sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1); psb_intel_output->type = INTEL_OUTPUT_SDVO; connector->interlace_allowed = 0; connector->doublescan_allowed = 0; /* setup the DDC bus. */ if (output_device == SDVOB) i2cbus = psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); else i2cbus = psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); if (!i2cbus) goto err_connector; sdvo_priv->i2c_bus = i2cbus; if (output_device == SDVOB) { output_id = 1; sdvo_priv->by_input_wiring = SDVOB_IN0; sdvo_priv->i2c_bus->slave_addr = 0x38; } else { output_id = 2; sdvo_priv->i2c_bus->slave_addr = 0x39; } sdvo_priv->output_device = output_device; psb_intel_output->i2c_bus = i2cbus; psb_intel_output->dev_priv = sdvo_priv; /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) { DRM_DEBUG("No SDVO device found on SDVO%c\n", output_device == SDVOB ? 'B' : 'C'); goto err_i2c; } } psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps); memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs)); /* TODO, CVBS, SVID, YPRPB & SCART outputs. */ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) { sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0; sdvo_priv->active_device = SDVO_DEVICE_CRT; connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_DAC; connector_type = DRM_MODE_CONNECTOR_VGA; } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) { sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1; sdvo_priv->active_outputs = SDVO_DEVICE_CRT; connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_DAC; connector_type = DRM_MODE_CONNECTOR_VGA; } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) { sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0; sdvo_priv->active_device = SDVO_DEVICE_TMDS; connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_TMDS; connector_type = DRM_MODE_CONNECTOR_DVID; } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) { sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1; sdvo_priv->active_device = SDVO_DEVICE_TMDS; connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_TMDS; connector_type = DRM_MODE_CONNECTOR_DVID; } else { unsigned char bytes[2]; memcpy(bytes, &sdvo_priv->caps.output_flags, 2); DRM_DEBUG ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n", SDVO_NAME(sdvo_priv), bytes[0], bytes[1]); goto err_i2c; } drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs, encoder_type); drm_encoder_helper_add(&psb_intel_output->enc, &psb_intel_sdvo_helper_funcs); connector->connector_type = connector_type; drm_mode_connector_attach_encoder(&psb_intel_output->base, &psb_intel_output->enc); drm_sysfs_connector_add(connector); /* Set the input timing to the screen. Assume always input 0. */ psb_intel_sdvo_set_target_input(psb_intel_output, true, false); psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output, &sdvo_priv->pixel_clock_min, &sdvo_priv-> pixel_clock_max); DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " "input 1: %c, input 2: %c, " "output 1: %c, output 2: %c\n", SDVO_NAME(sdvo_priv), sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, sdvo_priv->caps.device_rev_id, sdvo_priv->pixel_clock_min / 1000, sdvo_priv->pixel_clock_max / 1000, (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', /* check currently supported outputs */ sdvo_priv->caps.output_flags & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', sdvo_priv->caps.output_flags & (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); psb_intel_output->ddc_bus = i2cbus; return; err_i2c: psb_intel_i2c_destroy(psb_intel_output->i2c_bus); err_connector: drm_connector_cleanup(connector); kfree(psb_intel_output); return; }
gpl-2.0
treznorx/TF201-9.4.2.7
drivers/staging/sm7xx/smtcfb.c
150
26877
/* * Silicon Motion SM7XX frame buffer device * * Copyright (C) 2006 Silicon Motion Technology Corp. * Authors: Ge Wang, gewang@siliconmotion.com * Boyod boyod.yang@siliconmotion.com.cn * * Copyright (C) 2009 Lemote, Inc. * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Version 0.10.26192.21.01 * - Add PowerPC/Big endian support * - Add 2D support for Lynx * - Verified on2.6.19.2 Boyod.yang <boyod.yang@siliconmotion.com.cn> * * Version 0.09.2621.00.01 * - Only support Linux Kernel's version 2.6.21. * Boyod.yang <boyod.yang@siliconmotion.com.cn> * * Version 0.09 * - Only support Linux Kernel's version 2.6.12. * Boyod.yang <boyod.yang@siliconmotion.com.cn> */ #include <linux/io.h> #include <linux/fb.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/console.h> #include <linux/screen_info.h> #ifdef CONFIG_PM #include <linux/pm.h> #endif struct screen_info smtc_screen_info; #include "smtcfb.h" #ifdef DEBUG #define smdbg(format, arg...) printk(KERN_DEBUG format , ## arg) #else #define smdbg(format, arg...) #endif /* * Private structure */ struct smtcfb_info { /* * The following is a pointer to be passed into the * functions below. The modules outside the main * voyager.c driver have no knowledge as to what * is within this structure. */ struct fb_info fb; struct display_switch *dispsw; struct pci_dev *dev; signed int currcon; struct { u8 red, green, blue; } palette[NR_RGB]; u_int palette_size; }; struct par_info { /* * Hardware */ u16 chipID; unsigned char __iomem *m_pMMIO; char __iomem *m_pLFB; char *m_pDPR; char *m_pVPR; char *m_pCPR; u_int width; u_int height; u_int hz; u_long BaseAddressInVRAM; u8 chipRevID; }; struct vesa_mode_table { char mode_index[6]; u16 lfb_width; u16 lfb_height; u16 lfb_depth; }; static struct vesa_mode_table vesa_mode[] = { {"0x301", 640, 480, 8}, {"0x303", 800, 600, 8}, {"0x305", 1024, 768, 8}, {"0x307", 1280, 1024, 8}, {"0x311", 640, 480, 16}, {"0x314", 800, 600, 16}, {"0x317", 1024, 768, 16}, {"0x31A", 1280, 1024, 16}, {"0x312", 640, 480, 24}, {"0x315", 800, 600, 24}, {"0x318", 1024, 768, 24}, {"0x31B", 1280, 1024, 24}, }; char __iomem *smtc_RegBaseAddress; /* Memory Map IO starting address */ char __iomem *smtc_VRAMBaseAddress; /* video memory starting address */ static u32 colreg[17]; static struct par_info hw; /* hardware information */ u16 smtc_ChipIDs[] = { 0x710, 0x712, 0x720 }; #define numSMTCchipIDs (sizeof(smtc_ChipIDs) / sizeof(u16)) static void sm712_set_timing(struct smtcfb_info *sfb, struct par_info *ppar_info) { int i = 0, j = 0; u32 m_nScreenStride; smdbg("\nppar_info->width = %d ppar_info->height = %d" "sfb->fb.var.bits_per_pixel = %d ppar_info->hz = %d\n", ppar_info->width, ppar_info->height, sfb->fb.var.bits_per_pixel, ppar_info->hz); for (j = 0; j < numVGAModes; j++) { if (VGAMode[j].mmSizeX == ppar_info->width && VGAMode[j].mmSizeY == ppar_info->height && VGAMode[j].bpp == sfb->fb.var.bits_per_pixel && VGAMode[j].hz == ppar_info->hz) { smdbg("\nVGAMode[j].mmSizeX = %d VGAMode[j].mmSizeY =" "%d VGAMode[j].bpp = %d" "VGAMode[j].hz=%d\n", VGAMode[j].mmSizeX, VGAMode[j].mmSizeY, VGAMode[j].bpp, VGAMode[j].hz); smdbg("VGAMode index=%d\n", j); smtc_mmiowb(0x0, 0x3c6); smtc_seqw(0, 0x1); smtc_mmiowb(VGAMode[j].Init_MISC, 0x3c2); /* init SEQ register SR00 - SR04 */ for (i = 0; i < SIZE_SR00_SR04; i++) smtc_seqw(i, VGAMode[j].Init_SR00_SR04[i]); /* init SEQ register SR10 - SR24 */ for (i = 0; i < SIZE_SR10_SR24; i++) smtc_seqw(i + 0x10, VGAMode[j].Init_SR10_SR24[i]); /* init SEQ register SR30 - SR75 */ for (i = 0; i < SIZE_SR30_SR75; i++) if (((i + 0x30) != 0x62) \ && ((i + 0x30) != 0x6a) \ && ((i + 0x30) != 0x6b)) smtc_seqw(i + 0x30, VGAMode[j].Init_SR30_SR75[i]); /* init SEQ register SR80 - SR93 */ for (i = 0; i < SIZE_SR80_SR93; i++) smtc_seqw(i + 0x80, VGAMode[j].Init_SR80_SR93[i]); /* init SEQ register SRA0 - SRAF */ for (i = 0; i < SIZE_SRA0_SRAF; i++) smtc_seqw(i + 0xa0, VGAMode[j].Init_SRA0_SRAF[i]); /* init Graphic register GR00 - GR08 */ for (i = 0; i < SIZE_GR00_GR08; i++) smtc_grphw(i, VGAMode[j].Init_GR00_GR08[i]); /* init Attribute register AR00 - AR14 */ for (i = 0; i < SIZE_AR00_AR14; i++) smtc_attrw(i, VGAMode[j].Init_AR00_AR14[i]); /* init CRTC register CR00 - CR18 */ for (i = 0; i < SIZE_CR00_CR18; i++) smtc_crtcw(i, VGAMode[j].Init_CR00_CR18[i]); /* init CRTC register CR30 - CR4D */ for (i = 0; i < SIZE_CR30_CR4D; i++) smtc_crtcw(i + 0x30, VGAMode[j].Init_CR30_CR4D[i]); /* init CRTC register CR90 - CRA7 */ for (i = 0; i < SIZE_CR90_CRA7; i++) smtc_crtcw(i + 0x90, VGAMode[j].Init_CR90_CRA7[i]); } } smtc_mmiowb(0x67, 0x3c2); /* set VPR registers */ writel(0x0, ppar_info->m_pVPR + 0x0C); writel(0x0, ppar_info->m_pVPR + 0x40); /* set data width */ m_nScreenStride = (ppar_info->width * sfb->fb.var.bits_per_pixel) / 64; switch (sfb->fb.var.bits_per_pixel) { case 8: writel(0x0, ppar_info->m_pVPR + 0x0); break; case 16: writel(0x00020000, ppar_info->m_pVPR + 0x0); break; case 24: writel(0x00040000, ppar_info->m_pVPR + 0x0); break; case 32: writel(0x00030000, ppar_info->m_pVPR + 0x0); break; } writel((u32) (((m_nScreenStride + 2) << 16) | m_nScreenStride), ppar_info->m_pVPR + 0x10); } static void sm712_setpalette(int regno, unsigned red, unsigned green, unsigned blue, struct fb_info *info) { struct par_info *cur_par = (struct par_info *)info->par; if (cur_par->BaseAddressInVRAM) /* * second display palette for dual head. Enable CRT RAM, 6-bit * RAM */ smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x20); else /* primary display palette. Enable LCD RAM only, 6-bit RAM */ smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x10); smtc_mmiowb(regno, dac_reg); smtc_mmiowb(red >> 10, dac_val); smtc_mmiowb(green >> 10, dac_val); smtc_mmiowb(blue >> 10, dac_val); } static void smtc_set_timing(struct smtcfb_info *sfb, struct par_info *ppar_info) { switch (ppar_info->chipID) { case 0x710: case 0x712: case 0x720: sm712_set_timing(sfb, ppar_info); break; } } static struct fb_var_screeninfo smtcfb_var = { .xres = 1024, .yres = 600, .xres_virtual = 1024, .yres_virtual = 600, .bits_per_pixel = 16, .red = {16, 8, 0}, .green = {8, 8, 0}, .blue = {0, 8, 0}, .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .vmode = FB_VMODE_NONINTERLACED, }; static struct fb_fix_screeninfo smtcfb_fix = { .id = "sm712fb", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .line_length = 800 * 3, .accel = FB_ACCEL_SMI_LYNX, }; /* chan_to_field * * convert a colour value into a field position * * from pxafb.c */ static inline unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf) { chan &= 0xffff; chan >>= 16 - bf->length; return chan << bf->offset; } static int cfb_blank(int blank_mode, struct fb_info *info) { /* clear DPMS setting */ switch (blank_mode) { case FB_BLANK_UNBLANK: /* Screen On: HSync: On, VSync : On */ smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20))); smtc_seqw(0x6a, 0x16); smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77)); smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30))); smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0))); smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01)); smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03)); break; case FB_BLANK_NORMAL: /* Screen Off: HSync: On, VSync : On Soft blank */ smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20))); smtc_seqw(0x6a, 0x16); smtc_seqw(0x6b, 0x02); smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30))); smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0))); smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01)); smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); break; case FB_BLANK_VSYNC_SUSPEND: /* Screen On: HSync: On, VSync : Off */ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20)); smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); smtc_seqw(0x6a, 0x0c); smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88)); smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20)); smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20)); smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80)); break; case FB_BLANK_HSYNC_SUSPEND: /* Screen On: HSync: Off, VSync : On */ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20)); smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); smtc_seqw(0x6a, 0x0c); smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88)); smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10)); smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8)); smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80)); break; case FB_BLANK_POWERDOWN: /* Screen On: HSync: Off, VSync : Off */ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20)); smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); smtc_seqw(0x6a, 0x0c); smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88)); smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30)); smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8)); smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80)); break; default: return -EINVAL; } return 0; } static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned trans, struct fb_info *info) { struct smtcfb_info *sfb = (struct smtcfb_info *)info; u32 val; if (regno > 255) return 1; switch (sfb->fb.fix.visual) { case FB_VISUAL_DIRECTCOLOR: case FB_VISUAL_TRUECOLOR: /* * 16/32 bit true-colour, use pseuo-palette for 16 base color */ if (regno < 16) { if (sfb->fb.var.bits_per_pixel == 16) { u32 *pal = sfb->fb.pseudo_palette; val = chan_to_field(red, &sfb->fb.var.red); val |= chan_to_field(green, \ &sfb->fb.var.green); val |= chan_to_field(blue, &sfb->fb.var.blue); #ifdef __BIG_ENDIAN pal[regno] = ((red & 0xf800) >> 8) | ((green & 0xe000) >> 13) | ((green & 0x1c00) << 3) | ((blue & 0xf800) >> 3); #else pal[regno] = val; #endif } else { u32 *pal = sfb->fb.pseudo_palette; val = chan_to_field(red, &sfb->fb.var.red); val |= chan_to_field(green, \ &sfb->fb.var.green); val |= chan_to_field(blue, &sfb->fb.var.blue); #ifdef __BIG_ENDIAN val = (val & 0xff00ff00 >> 8) | (val & 0x00ff00ff << 8); #endif pal[regno] = val; } } break; case FB_VISUAL_PSEUDOCOLOR: /* color depth 8 bit */ sm712_setpalette(regno, red, green, blue, info); break; default: return 1; /* unknown type */ } return 0; } #ifdef __BIG_ENDIAN static ssize_t smtcfb_read(struct fb_info *info, char __user * buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; u32 *buffer, *dst; u32 __iomem *src; int c, i, cnt = 0, err = 0; unsigned long total_size; if (!info || !info->screen_base) return -ENODEV; if (info->state != FBINFO_STATE_RUNNING) return -EPERM; total_size = info->screen_size; if (total_size == 0) total_size = info->fix.smem_len; if (p >= total_size) return 0; if (count >= total_size) count = total_size; if (count + p > total_size) count = total_size - p; buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL); if (!buffer) return -ENOMEM; src = (u32 __iomem *) (info->screen_base + p); if (info->fbops->fb_sync) info->fbops->fb_sync(info); while (count) { c = (count > PAGE_SIZE) ? PAGE_SIZE : count; dst = buffer; for (i = c >> 2; i--;) { *dst = fb_readl(src++); *dst = (*dst & 0xff00ff00 >> 8) | (*dst & 0x00ff00ff << 8); dst++; } if (c & 3) { u8 *dst8 = (u8 *) dst; u8 __iomem *src8 = (u8 __iomem *) src; for (i = c & 3; i--;) { if (i & 1) { *dst8++ = fb_readb(++src8); } else { *dst8++ = fb_readb(--src8); src8 += 2; } } src = (u32 __iomem *) src8; } if (copy_to_user(buf, buffer, c)) { err = -EFAULT; break; } *ppos += c; buf += c; cnt += c; count -= c; } kfree(buffer); return (err) ? err : cnt; } static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; u32 *buffer, *src; u32 __iomem *dst; int c, i, cnt = 0, err = 0; unsigned long total_size; if (!info || !info->screen_base) return -ENODEV; if (info->state != FBINFO_STATE_RUNNING) return -EPERM; total_size = info->screen_size; if (total_size == 0) total_size = info->fix.smem_len; if (p > total_size) return -EFBIG; if (count > total_size) { err = -EFBIG; count = total_size; } if (count + p > total_size) { if (!err) err = -ENOSPC; count = total_size - p; } buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL); if (!buffer) return -ENOMEM; dst = (u32 __iomem *) (info->screen_base + p); if (info->fbops->fb_sync) info->fbops->fb_sync(info); while (count) { c = (count > PAGE_SIZE) ? PAGE_SIZE : count; src = buffer; if (copy_from_user(src, buf, c)) { err = -EFAULT; break; } for (i = c >> 2; i--;) { fb_writel((*src & 0xff00ff00 >> 8) | (*src & 0x00ff00ff << 8), dst++); src++; } if (c & 3) { u8 *src8 = (u8 *) src; u8 __iomem *dst8 = (u8 __iomem *) dst; for (i = c & 3; i--;) { if (i & 1) { fb_writeb(*src8++, ++dst8); } else { fb_writeb(*src8++, --dst8); dst8 += 2; } } dst = (u32 __iomem *) dst8; } *ppos += c; buf += c; cnt += c; count -= c; } kfree(buffer); return (cnt) ? cnt : err; } #endif /* ! __BIG_ENDIAN */ static struct fb_ops smtcfb_ops = { .owner = THIS_MODULE, .fb_setcolreg = smtc_setcolreg, .fb_blank = cfb_blank, .fb_fillrect = cfb_fillrect, .fb_imageblit = cfb_imageblit, .fb_copyarea = cfb_copyarea, #ifdef __BIG_ENDIAN .fb_read = smtcfb_read, .fb_write = smtcfb_write, #endif }; void smtcfb_setmode(struct smtcfb_info *sfb) { switch (sfb->fb.var.bits_per_pixel) { case 32: sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR; sfb->fb.fix.line_length = sfb->fb.var.xres * 4; sfb->fb.var.red.length = 8; sfb->fb.var.green.length = 8; sfb->fb.var.blue.length = 8; sfb->fb.var.red.offset = 16; sfb->fb.var.green.offset = 8; sfb->fb.var.blue.offset = 0; break; case 8: sfb->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; sfb->fb.fix.line_length = sfb->fb.var.xres; sfb->fb.var.red.offset = 5; sfb->fb.var.red.length = 3; sfb->fb.var.green.offset = 2; sfb->fb.var.green.length = 3; sfb->fb.var.blue.offset = 0; sfb->fb.var.blue.length = 2; break; case 24: sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR; sfb->fb.fix.line_length = sfb->fb.var.xres * 3; sfb->fb.var.red.length = 8; sfb->fb.var.green.length = 8; sfb->fb.var.blue.length = 8; sfb->fb.var.red.offset = 16; sfb->fb.var.green.offset = 8; sfb->fb.var.blue.offset = 0; break; case 16: default: sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR; sfb->fb.fix.line_length = sfb->fb.var.xres * 2; sfb->fb.var.red.length = 5; sfb->fb.var.green.length = 6; sfb->fb.var.blue.length = 5; sfb->fb.var.red.offset = 11; sfb->fb.var.green.offset = 5; sfb->fb.var.blue.offset = 0; break; } hw.width = sfb->fb.var.xres; hw.height = sfb->fb.var.yres; hw.hz = 60; smtc_set_timing(sfb, &hw); } /* * Alloc struct smtcfb_info and assign the default value */ static struct smtcfb_info *smtc_alloc_fb_info(struct pci_dev *dev, char *name) { struct smtcfb_info *sfb; sfb = kzalloc(sizeof(struct smtcfb_info), GFP_KERNEL); if (!sfb) return NULL; sfb->currcon = -1; sfb->dev = dev; /*** Init sfb->fb with default value ***/ sfb->fb.flags = FBINFO_FLAG_DEFAULT; sfb->fb.fbops = &smtcfb_ops; sfb->fb.var = smtcfb_var; sfb->fb.fix = smtcfb_fix; strcpy(sfb->fb.fix.id, name); sfb->fb.fix.type = FB_TYPE_PACKED_PIXELS; sfb->fb.fix.type_aux = 0; sfb->fb.fix.xpanstep = 0; sfb->fb.fix.ypanstep = 0; sfb->fb.fix.ywrapstep = 0; sfb->fb.fix.accel = FB_ACCEL_SMI_LYNX; sfb->fb.var.nonstd = 0; sfb->fb.var.activate = FB_ACTIVATE_NOW; sfb->fb.var.height = -1; sfb->fb.var.width = -1; /* text mode acceleration */ sfb->fb.var.accel_flags = FB_ACCELF_TEXT; sfb->fb.var.vmode = FB_VMODE_NONINTERLACED; sfb->fb.par = &hw; sfb->fb.pseudo_palette = colreg; return sfb; } /* * Unmap in the memory mapped IO registers */ static void smtc_unmap_mmio(struct smtcfb_info *sfb) { if (sfb && smtc_RegBaseAddress) smtc_RegBaseAddress = NULL; } /* * Map in the screen memory */ static int smtc_map_smem(struct smtcfb_info *sfb, struct pci_dev *dev, u_long smem_len) { if (sfb->fb.var.bits_per_pixel == 32) { #ifdef __BIG_ENDIAN sfb->fb.fix.smem_start = pci_resource_start(dev, 0) + 0x800000; #else sfb->fb.fix.smem_start = pci_resource_start(dev, 0); #endif } else { sfb->fb.fix.smem_start = pci_resource_start(dev, 0); } sfb->fb.fix.smem_len = smem_len; sfb->fb.screen_base = smtc_VRAMBaseAddress; if (!sfb->fb.screen_base) { printk(KERN_INFO "%s: unable to map screen memory\n", sfb->fb.fix.id); return -ENOMEM; } return 0; } /* * Unmap in the screen memory * */ static void smtc_unmap_smem(struct smtcfb_info *sfb) { if (sfb && sfb->fb.screen_base) { iounmap(sfb->fb.screen_base); sfb->fb.screen_base = NULL; } } /* * We need to wake up the LynxEM+, and make sure its in linear memory mode. */ static inline void sm7xx_init_hw(void) { outb_p(0x18, 0x3c4); outb_p(0x11, 0x3c5); } static void smtc_free_fb_info(struct smtcfb_info *sfb) { if (sfb) { fb_alloc_cmap(&sfb->fb.cmap, 0, 0); kfree(sfb); } } /* * sm712vga_setup - process command line options, get vga parameter * @options: string of options * Returns zero. * */ static int __init __maybe_unused sm712vga_setup(char *options) { int index; if (!options || !*options) { smdbg("\n No vga parameter\n"); return -EINVAL; } smtc_screen_info.lfb_width = 0; smtc_screen_info.lfb_height = 0; smtc_screen_info.lfb_depth = 0; smdbg("\nsm712vga_setup = %s\n", options); for (index = 0; index < (sizeof(vesa_mode) / sizeof(struct vesa_mode_table)); index++) { if (strstr(options, vesa_mode[index].mode_index)) { smtc_screen_info.lfb_width = vesa_mode[index].lfb_width; smtc_screen_info.lfb_height = vesa_mode[index].lfb_height; smtc_screen_info.lfb_depth = vesa_mode[index].lfb_depth; return 0; } } return -1; } __setup("vga=", sm712vga_setup); /* Jason (08/13/2009) * Original init function changed to probe method to be used by pci_drv * process used to detect chips replaced with kernel process in pci_drv */ static int __devinit smtcfb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct smtcfb_info *sfb; u_long smem_size = 0x00800000; /* default 8MB */ char name[16]; int err; unsigned long pFramebufferPhysical; printk(KERN_INFO "Silicon Motion display driver " SMTC_LINUX_FB_VERSION "\n"); err = pci_enable_device(pdev); /* enable SMTC chip */ if (err) return err; err = -ENOMEM; hw.chipID = ent->device; sprintf(name, "sm%Xfb", hw.chipID); sfb = smtc_alloc_fb_info(pdev, name); if (!sfb) goto failed_free; /* Jason (08/13/2009) * Store fb_info to be further used when suspending and resuming */ pci_set_drvdata(pdev, sfb); sm7xx_init_hw(); /*get mode parameter from smtc_screen_info */ if (smtc_screen_info.lfb_width != 0) { sfb->fb.var.xres = smtc_screen_info.lfb_width; sfb->fb.var.yres = smtc_screen_info.lfb_height; sfb->fb.var.bits_per_pixel = smtc_screen_info.lfb_depth; } else { /* default resolution 1024x600 16bit mode */ sfb->fb.var.xres = SCREEN_X_RES; sfb->fb.var.yres = SCREEN_Y_RES; sfb->fb.var.bits_per_pixel = SCREEN_BPP; } #ifdef __BIG_ENDIAN if (sfb->fb.var.bits_per_pixel == 24) sfb->fb.var.bits_per_pixel = (smtc_screen_info.lfb_depth = 32); #endif /* Map address and memory detection */ pFramebufferPhysical = pci_resource_start(pdev, 0); pci_read_config_byte(pdev, PCI_REVISION_ID, &hw.chipRevID); switch (hw.chipID) { case 0x710: case 0x712: sfb->fb.fix.mmio_start = pFramebufferPhysical + 0x00400000; sfb->fb.fix.mmio_len = 0x00400000; smem_size = SM712_VIDEOMEMORYSIZE; #ifdef __BIG_ENDIAN hw.m_pLFB = (smtc_VRAMBaseAddress = ioremap(pFramebufferPhysical, 0x00c00000)); #else hw.m_pLFB = (smtc_VRAMBaseAddress = ioremap(pFramebufferPhysical, 0x00800000)); #endif hw.m_pMMIO = (smtc_RegBaseAddress = smtc_VRAMBaseAddress + 0x00700000); hw.m_pDPR = smtc_VRAMBaseAddress + 0x00408000; hw.m_pVPR = hw.m_pLFB + 0x0040c000; #ifdef __BIG_ENDIAN if (sfb->fb.var.bits_per_pixel == 32) { smtc_VRAMBaseAddress += 0x800000; hw.m_pLFB += 0x800000; printk(KERN_INFO "\nsmtc_VRAMBaseAddress=%p hw.m_pLFB=%p\n", smtc_VRAMBaseAddress, hw.m_pLFB); } #endif if (!smtc_RegBaseAddress) { printk(KERN_INFO "%s: unable to map memory mapped IO\n", sfb->fb.fix.id); err = -ENOMEM; goto failed_fb; } /* set MCLK = 14.31818 * (0x16 / 0x2) */ smtc_seqw(0x6a, 0x16); smtc_seqw(0x6b, 0x02); smtc_seqw(0x62, 0x3e); /* enable PCI burst */ smtc_seqw(0x17, 0x20); /* enable word swap */ #ifdef __BIG_ENDIAN if (sfb->fb.var.bits_per_pixel == 32) smtc_seqw(0x17, 0x30); #endif break; case 0x720: sfb->fb.fix.mmio_start = pFramebufferPhysical; sfb->fb.fix.mmio_len = 0x00200000; smem_size = SM722_VIDEOMEMORYSIZE; hw.m_pDPR = ioremap(pFramebufferPhysical, 0x00a00000); hw.m_pLFB = (smtc_VRAMBaseAddress = hw.m_pDPR + 0x00200000); hw.m_pMMIO = (smtc_RegBaseAddress = hw.m_pDPR + 0x000c0000); hw.m_pVPR = hw.m_pDPR + 0x800; smtc_seqw(0x62, 0xff); smtc_seqw(0x6a, 0x0d); smtc_seqw(0x6b, 0x02); break; default: printk(KERN_INFO "No valid Silicon Motion display chip was detected!\n"); goto failed_fb; } /* can support 32 bpp */ if (15 == sfb->fb.var.bits_per_pixel) sfb->fb.var.bits_per_pixel = 16; sfb->fb.var.xres_virtual = sfb->fb.var.xres; sfb->fb.var.yres_virtual = sfb->fb.var.yres; err = smtc_map_smem(sfb, pdev, smem_size); if (err) goto failed; smtcfb_setmode(sfb); /* Primary display starting from 0 position */ hw.BaseAddressInVRAM = 0; sfb->fb.par = &hw; err = register_framebuffer(&sfb->fb); if (err < 0) goto failed; printk(KERN_INFO "Silicon Motion SM%X Rev%X primary display mode" "%dx%d-%d Init Complete.\n", hw.chipID, hw.chipRevID, sfb->fb.var.xres, sfb->fb.var.yres, sfb->fb.var.bits_per_pixel); return 0; failed: printk(KERN_INFO "Silicon Motion, Inc. primary display init fail\n"); smtc_unmap_smem(sfb); smtc_unmap_mmio(sfb); failed_fb: smtc_free_fb_info(sfb); failed_free: pci_disable_device(pdev); return err; } /* Jason (08/11/2009) PCI_DRV wrapper essential structs */ static DEFINE_PCI_DEVICE_TABLE(smtcfb_pci_table) = { {0x126f, 0x710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x126f, 0x712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x126f, 0x720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; /* Jason (08/14/2009) * do some clean up when the driver module is removed */ static void __devexit smtcfb_pci_remove(struct pci_dev *pdev) { struct smtcfb_info *sfb; sfb = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); smtc_unmap_smem(sfb); smtc_unmap_mmio(sfb); unregister_framebuffer(&sfb->fb); smtc_free_fb_info(sfb); } #ifdef CONFIG_PM /* Jason (08/14/2009) * suspend function, called when the suspend event is triggered */ static int __maybe_unused smtcfb_suspend(struct pci_dev *pdev, pm_message_t msg) { struct smtcfb_info *sfb; int retv; sfb = pci_get_drvdata(pdev); /* set the hw in sleep mode use externel clock and self memory refresh * so that we can turn off internal PLLs later on */ smtc_seqw(0x20, (smtc_seqr(0x20) | 0xc0)); smtc_seqw(0x69, (smtc_seqr(0x69) & 0xf7)); switch (msg.event) { case PM_EVENT_FREEZE: case PM_EVENT_PRETHAW: pdev->dev.power.power_state = msg; return 0; } /* when doing suspend, call fb apis and pci apis */ if (msg.event == PM_EVENT_SUSPEND) { console_lock(); fb_set_suspend(&sfb->fb, 1); console_unlock(); retv = pci_save_state(pdev); pci_disable_device(pdev); retv = pci_choose_state(pdev, msg); retv = pci_set_power_state(pdev, retv); } pdev->dev.power.power_state = msg; /* additionally turn off all function blocks including internal PLLs */ smtc_seqw(0x21, 0xff); return 0; } static int __maybe_unused smtcfb_resume(struct pci_dev *pdev) { struct smtcfb_info *sfb; int retv; sfb = pci_get_drvdata(pdev); /* when resuming, restore pci data and fb cursor */ if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) { retv = pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if (pci_enable_device(pdev)) return -1; pci_set_master(pdev); } /* reinit hardware */ sm7xx_init_hw(); switch (hw.chipID) { case 0x710: case 0x712: /* set MCLK = 14.31818 * (0x16 / 0x2) */ smtc_seqw(0x6a, 0x16); smtc_seqw(0x6b, 0x02); smtc_seqw(0x62, 0x3e); /* enable PCI burst */ smtc_seqw(0x17, 0x20); #ifdef __BIG_ENDIAN if (sfb->fb.var.bits_per_pixel == 32) smtc_seqw(0x17, 0x30); #endif break; case 0x720: smtc_seqw(0x62, 0xff); smtc_seqw(0x6a, 0x0d); smtc_seqw(0x6b, 0x02); break; } smtc_seqw(0x34, (smtc_seqr(0x34) | 0xc0)); smtc_seqw(0x33, ((smtc_seqr(0x33) | 0x08) & 0xfb)); smtcfb_setmode(sfb); console_lock(); fb_set_suspend(&sfb->fb, 0); console_unlock(); return 0; } #endif /* Jason (08/13/2009) * pci_driver struct used to wrap the original driver * so that it can be registered into the kernel and * the proper method would be called when suspending and resuming */ static struct pci_driver smtcfb_driver = { .name = "smtcfb", .id_table = smtcfb_pci_table, .probe = smtcfb_pci_probe, .remove = __devexit_p(smtcfb_pci_remove), #ifdef CONFIG_PM .suspend = smtcfb_suspend, .resume = smtcfb_resume, #endif }; static int __init smtcfb_init(void) { return pci_register_driver(&smtcfb_driver); } static void __exit smtcfb_exit(void) { pci_unregister_driver(&smtcfb_driver); } module_init(smtcfb_init); module_exit(smtcfb_exit); MODULE_AUTHOR("Siliconmotion "); MODULE_DESCRIPTION("Framebuffer driver for SMI Graphic Cards"); MODULE_LICENSE("GPL");
gpl-2.0
rchicoli/linux
drivers/usb/phy/phy-mxs-usb.c
150
16066
/* * Copyright 2012-2014 Freescale Semiconductor, Inc. * Copyright (C) 2012 Marek Vasut <marex@denx.de> * on behalf of DENX Software Engineering GmbH * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/usb/otg.h> #include <linux/stmp_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/of_device.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #define DRIVER_NAME "mxs_phy" #define HW_USBPHY_PWD 0x00 #define HW_USBPHY_CTRL 0x30 #define HW_USBPHY_CTRL_SET 0x34 #define HW_USBPHY_CTRL_CLR 0x38 #define HW_USBPHY_DEBUG_SET 0x54 #define HW_USBPHY_DEBUG_CLR 0x58 #define HW_USBPHY_IP 0x90 #define HW_USBPHY_IP_SET 0x94 #define HW_USBPHY_IP_CLR 0x98 #define BM_USBPHY_CTRL_SFTRST BIT(31) #define BM_USBPHY_CTRL_CLKGATE BIT(30) #define BM_USBPHY_CTRL_OTG_ID_VALUE BIT(27) #define BM_USBPHY_CTRL_ENAUTOSET_USBCLKS BIT(26) #define BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE BIT(25) #define BM_USBPHY_CTRL_ENVBUSCHG_WKUP BIT(23) #define BM_USBPHY_CTRL_ENIDCHG_WKUP BIT(22) #define BM_USBPHY_CTRL_ENDPDMCHG_WKUP BIT(21) #define BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD BIT(20) #define BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE BIT(19) #define BM_USBPHY_CTRL_ENAUTO_PWRON_PLL BIT(18) #define BM_USBPHY_CTRL_ENUTMILEVEL3 BIT(15) #define BM_USBPHY_CTRL_ENUTMILEVEL2 BIT(14) #define BM_USBPHY_CTRL_ENHOSTDISCONDETECT BIT(1) #define BM_USBPHY_IP_FIX (BIT(17) | BIT(18)) #define BM_USBPHY_DEBUG_CLKGATE BIT(30) /* Anatop Registers */ #define ANADIG_ANA_MISC0 0x150 #define ANADIG_ANA_MISC0_SET 0x154 #define ANADIG_ANA_MISC0_CLR 0x158 #define ANADIG_USB1_VBUS_DET_STAT 0x1c0 #define ANADIG_USB2_VBUS_DET_STAT 0x220 #define ANADIG_USB1_LOOPBACK_SET 0x1e4 #define ANADIG_USB1_LOOPBACK_CLR 0x1e8 #define ANADIG_USB2_LOOPBACK_SET 0x244 #define ANADIG_USB2_LOOPBACK_CLR 0x248 #define ANADIG_USB1_MISC 0x1f0 #define ANADIG_USB2_MISC 0x250 #define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG BIT(12) #define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL BIT(11) #define BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID BIT(3) #define BM_ANADIG_USB2_VBUS_DET_STAT_VBUS_VALID BIT(3) #define BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 BIT(2) #define BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN BIT(5) #define BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 BIT(2) #define BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN BIT(5) #define BM_ANADIG_USB1_MISC_RX_VPIN_FS BIT(29) #define BM_ANADIG_USB1_MISC_RX_VMIN_FS BIT(28) #define BM_ANADIG_USB2_MISC_RX_VPIN_FS BIT(29) #define BM_ANADIG_USB2_MISC_RX_VMIN_FS BIT(28) #define to_mxs_phy(p) container_of((p), struct mxs_phy, phy) /* Do disconnection between PHY and controller without vbus */ #define MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS BIT(0) /* * The PHY will be in messy if there is a wakeup after putting * bus to suspend (set portsc.suspendM) but before setting PHY to low * power mode (set portsc.phcd). */ #define MXS_PHY_ABNORMAL_IN_SUSPEND BIT(1) /* * The SOF sends too fast after resuming, it will cause disconnection * between host and high speed device. */ #define MXS_PHY_SENDING_SOF_TOO_FAST BIT(2) /* * IC has bug fixes logic, they include * MXS_PHY_ABNORMAL_IN_SUSPEND and MXS_PHY_SENDING_SOF_TOO_FAST * which are described at above flags, the RTL will handle it * according to different versions. */ #define MXS_PHY_NEED_IP_FIX BIT(3) struct mxs_phy_data { unsigned int flags; }; static const struct mxs_phy_data imx23_phy_data = { .flags = MXS_PHY_ABNORMAL_IN_SUSPEND | MXS_PHY_SENDING_SOF_TOO_FAST, }; static const struct mxs_phy_data imx6q_phy_data = { .flags = MXS_PHY_SENDING_SOF_TOO_FAST | MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS | MXS_PHY_NEED_IP_FIX, }; static const struct mxs_phy_data imx6sl_phy_data = { .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS | MXS_PHY_NEED_IP_FIX, }; static const struct mxs_phy_data vf610_phy_data = { .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS | MXS_PHY_NEED_IP_FIX, }; static const struct mxs_phy_data imx6sx_phy_data = { .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS, }; static const struct mxs_phy_data imx6ul_phy_data = { .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS, }; static const struct of_device_id mxs_phy_dt_ids[] = { { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, }, { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, }, { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, }, { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, }, { .compatible = "fsl,vf610-usbphy", .data = &vf610_phy_data, }, { .compatible = "fsl,imx6ul-usbphy", .data = &imx6ul_phy_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids); struct mxs_phy { struct usb_phy phy; struct clk *clk; const struct mxs_phy_data *data; struct regmap *regmap_anatop; int port_id; }; static inline bool is_imx6q_phy(struct mxs_phy *mxs_phy) { return mxs_phy->data == &imx6q_phy_data; } static inline bool is_imx6sl_phy(struct mxs_phy *mxs_phy) { return mxs_phy->data == &imx6sl_phy_data; } /* * PHY needs some 32K cycles to switch from 32K clock to * bus (such as AHB/AXI, etc) clock. */ static void mxs_phy_clock_switch_delay(void) { usleep_range(300, 400); } static int mxs_phy_hw_init(struct mxs_phy *mxs_phy) { int ret; void __iomem *base = mxs_phy->phy.io_priv; ret = stmp_reset_block(base + HW_USBPHY_CTRL); if (ret) return ret; /* Power up the PHY */ writel(0, base + HW_USBPHY_PWD); /* * USB PHY Ctrl Setting * - Auto clock/power on * - Enable full/low speed support */ writel(BM_USBPHY_CTRL_ENAUTOSET_USBCLKS | BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE | BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD | BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE | BM_USBPHY_CTRL_ENAUTO_PWRON_PLL | BM_USBPHY_CTRL_ENUTMILEVEL2 | BM_USBPHY_CTRL_ENUTMILEVEL3, base + HW_USBPHY_CTRL_SET); if (mxs_phy->data->flags & MXS_PHY_NEED_IP_FIX) writel(BM_USBPHY_IP_FIX, base + HW_USBPHY_IP_SET); return 0; } /* Return true if the vbus is there */ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy) { unsigned int vbus_value = 0; if (!mxs_phy->regmap_anatop) return false; if (mxs_phy->port_id == 0) regmap_read(mxs_phy->regmap_anatop, ANADIG_USB1_VBUS_DET_STAT, &vbus_value); else if (mxs_phy->port_id == 1) regmap_read(mxs_phy->regmap_anatop, ANADIG_USB2_VBUS_DET_STAT, &vbus_value); if (vbus_value & BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID) return true; else return false; } static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect) { void __iomem *base = mxs_phy->phy.io_priv; u32 reg; if (disconnect) writel_relaxed(BM_USBPHY_DEBUG_CLKGATE, base + HW_USBPHY_DEBUG_CLR); if (mxs_phy->port_id == 0) { reg = disconnect ? ANADIG_USB1_LOOPBACK_SET : ANADIG_USB1_LOOPBACK_CLR; regmap_write(mxs_phy->regmap_anatop, reg, BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 | BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN); } else if (mxs_phy->port_id == 1) { reg = disconnect ? ANADIG_USB2_LOOPBACK_SET : ANADIG_USB2_LOOPBACK_CLR; regmap_write(mxs_phy->regmap_anatop, reg, BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 | BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN); } if (!disconnect) writel_relaxed(BM_USBPHY_DEBUG_CLKGATE, base + HW_USBPHY_DEBUG_SET); /* Delay some time, and let Linestate be SE0 for controller */ if (disconnect) usleep_range(500, 1000); } static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy) { void __iomem *base = mxs_phy->phy.io_priv; u32 phyctrl = readl(base + HW_USBPHY_CTRL); if (IS_ENABLED(CONFIG_USB_OTG) && !(phyctrl & BM_USBPHY_CTRL_OTG_ID_VALUE)) return true; return false; } static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on) { bool vbus_is_on = false; /* If the SoCs don't need to disconnect line without vbus, quit */ if (!(mxs_phy->data->flags & MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS)) return; /* If the SoCs don't have anatop, quit */ if (!mxs_phy->regmap_anatop) return; vbus_is_on = mxs_phy_get_vbus_status(mxs_phy); if (on && !vbus_is_on && !mxs_phy_is_otg_host(mxs_phy)) __mxs_phy_disconnect_line(mxs_phy, true); else __mxs_phy_disconnect_line(mxs_phy, false); } static int mxs_phy_init(struct usb_phy *phy) { int ret; struct mxs_phy *mxs_phy = to_mxs_phy(phy); mxs_phy_clock_switch_delay(); ret = clk_prepare_enable(mxs_phy->clk); if (ret) return ret; return mxs_phy_hw_init(mxs_phy); } static void mxs_phy_shutdown(struct usb_phy *phy) { struct mxs_phy *mxs_phy = to_mxs_phy(phy); u32 value = BM_USBPHY_CTRL_ENVBUSCHG_WKUP | BM_USBPHY_CTRL_ENDPDMCHG_WKUP | BM_USBPHY_CTRL_ENIDCHG_WKUP | BM_USBPHY_CTRL_ENAUTOSET_USBCLKS | BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE | BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD | BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE | BM_USBPHY_CTRL_ENAUTO_PWRON_PLL; writel(value, phy->io_priv + HW_USBPHY_CTRL_CLR); writel(0xffffffff, phy->io_priv + HW_USBPHY_PWD); writel(BM_USBPHY_CTRL_CLKGATE, phy->io_priv + HW_USBPHY_CTRL_SET); clk_disable_unprepare(mxs_phy->clk); } static bool mxs_phy_is_low_speed_connection(struct mxs_phy *mxs_phy) { unsigned int line_state; /* bit definition is the same for all controllers */ unsigned int dp_bit = BM_ANADIG_USB1_MISC_RX_VPIN_FS, dm_bit = BM_ANADIG_USB1_MISC_RX_VMIN_FS; unsigned int reg = ANADIG_USB1_MISC; /* If the SoCs don't have anatop, quit */ if (!mxs_phy->regmap_anatop) return false; if (mxs_phy->port_id == 0) reg = ANADIG_USB1_MISC; else if (mxs_phy->port_id == 1) reg = ANADIG_USB2_MISC; regmap_read(mxs_phy->regmap_anatop, reg, &line_state); if ((line_state & (dp_bit | dm_bit)) == dm_bit) return true; else return false; } static int mxs_phy_suspend(struct usb_phy *x, int suspend) { int ret; struct mxs_phy *mxs_phy = to_mxs_phy(x); bool low_speed_connection, vbus_is_on; low_speed_connection = mxs_phy_is_low_speed_connection(mxs_phy); vbus_is_on = mxs_phy_get_vbus_status(mxs_phy); if (suspend) { /* * FIXME: Do not power down RXPWD1PT1 bit for low speed * connect. The low speed connection will have problem at * very rare cases during usb suspend and resume process. */ if (low_speed_connection & vbus_is_on) { /* * If value to be set as pwd value is not 0xffffffff, * several 32Khz cycles are needed. */ mxs_phy_clock_switch_delay(); writel(0xffbfffff, x->io_priv + HW_USBPHY_PWD); } else { writel(0xffffffff, x->io_priv + HW_USBPHY_PWD); } writel(BM_USBPHY_CTRL_CLKGATE, x->io_priv + HW_USBPHY_CTRL_SET); clk_disable_unprepare(mxs_phy->clk); } else { mxs_phy_clock_switch_delay(); ret = clk_prepare_enable(mxs_phy->clk); if (ret) return ret; writel(BM_USBPHY_CTRL_CLKGATE, x->io_priv + HW_USBPHY_CTRL_CLR); writel(0, x->io_priv + HW_USBPHY_PWD); } return 0; } static int mxs_phy_set_wakeup(struct usb_phy *x, bool enabled) { struct mxs_phy *mxs_phy = to_mxs_phy(x); u32 value = BM_USBPHY_CTRL_ENVBUSCHG_WKUP | BM_USBPHY_CTRL_ENDPDMCHG_WKUP | BM_USBPHY_CTRL_ENIDCHG_WKUP; if (enabled) { mxs_phy_disconnect_line(mxs_phy, true); writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_SET); } else { writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_CLR); mxs_phy_disconnect_line(mxs_phy, false); } return 0; } static int mxs_phy_on_connect(struct usb_phy *phy, enum usb_device_speed speed) { dev_dbg(phy->dev, "%s device has connected\n", (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS"); if (speed == USB_SPEED_HIGH) writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT, phy->io_priv + HW_USBPHY_CTRL_SET); return 0; } static int mxs_phy_on_disconnect(struct usb_phy *phy, enum usb_device_speed speed) { dev_dbg(phy->dev, "%s device has disconnected\n", (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS"); /* Sometimes, the speed is not high speed when the error occurs */ if (readl(phy->io_priv + HW_USBPHY_CTRL) & BM_USBPHY_CTRL_ENHOSTDISCONDETECT) writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT, phy->io_priv + HW_USBPHY_CTRL_CLR); return 0; } static int mxs_phy_probe(struct platform_device *pdev) { struct resource *res; void __iomem *base; struct clk *clk; struct mxs_phy *mxs_phy; int ret; const struct of_device_id *of_id; struct device_node *np = pdev->dev.of_node; of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev); if (!of_id) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "can't get the clock, err=%ld", PTR_ERR(clk)); return PTR_ERR(clk); } mxs_phy = devm_kzalloc(&pdev->dev, sizeof(*mxs_phy), GFP_KERNEL); if (!mxs_phy) return -ENOMEM; /* Some SoCs don't have anatop registers */ if (of_get_property(np, "fsl,anatop", NULL)) { mxs_phy->regmap_anatop = syscon_regmap_lookup_by_phandle (np, "fsl,anatop"); if (IS_ERR(mxs_phy->regmap_anatop)) { dev_dbg(&pdev->dev, "failed to find regmap for anatop\n"); return PTR_ERR(mxs_phy->regmap_anatop); } } ret = of_alias_get_id(np, "usbphy"); if (ret < 0) dev_dbg(&pdev->dev, "failed to get alias id, errno %d\n", ret); mxs_phy->port_id = ret; mxs_phy->phy.io_priv = base; mxs_phy->phy.dev = &pdev->dev; mxs_phy->phy.label = DRIVER_NAME; mxs_phy->phy.init = mxs_phy_init; mxs_phy->phy.shutdown = mxs_phy_shutdown; mxs_phy->phy.set_suspend = mxs_phy_suspend; mxs_phy->phy.notify_connect = mxs_phy_on_connect; mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect; mxs_phy->phy.type = USB_PHY_TYPE_USB2; mxs_phy->phy.set_wakeup = mxs_phy_set_wakeup; mxs_phy->clk = clk; mxs_phy->data = of_id->data; platform_set_drvdata(pdev, mxs_phy); device_set_wakeup_capable(&pdev->dev, true); return usb_add_phy_dev(&mxs_phy->phy); } static int mxs_phy_remove(struct platform_device *pdev) { struct mxs_phy *mxs_phy = platform_get_drvdata(pdev); usb_remove_phy(&mxs_phy->phy); return 0; } #ifdef CONFIG_PM_SLEEP static void mxs_phy_enable_ldo_in_suspend(struct mxs_phy *mxs_phy, bool on) { unsigned int reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR; /* If the SoCs don't have anatop, quit */ if (!mxs_phy->regmap_anatop) return; if (is_imx6q_phy(mxs_phy)) regmap_write(mxs_phy->regmap_anatop, reg, BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG); else if (is_imx6sl_phy(mxs_phy)) regmap_write(mxs_phy->regmap_anatop, reg, BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL); } static int mxs_phy_system_suspend(struct device *dev) { struct mxs_phy *mxs_phy = dev_get_drvdata(dev); if (device_may_wakeup(dev)) mxs_phy_enable_ldo_in_suspend(mxs_phy, true); return 0; } static int mxs_phy_system_resume(struct device *dev) { struct mxs_phy *mxs_phy = dev_get_drvdata(dev); if (device_may_wakeup(dev)) mxs_phy_enable_ldo_in_suspend(mxs_phy, false); return 0; } #endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(mxs_phy_pm, mxs_phy_system_suspend, mxs_phy_system_resume); static struct platform_driver mxs_phy_driver = { .probe = mxs_phy_probe, .remove = mxs_phy_remove, .driver = { .name = DRIVER_NAME, .of_match_table = mxs_phy_dt_ids, .pm = &mxs_phy_pm, }, }; static int __init mxs_phy_module_init(void) { return platform_driver_register(&mxs_phy_driver); } postcore_initcall(mxs_phy_module_init); static void __exit mxs_phy_module_exit(void) { platform_driver_unregister(&mxs_phy_driver); } module_exit(mxs_phy_module_exit); MODULE_ALIAS("platform:mxs-usb-phy"); MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); MODULE_AUTHOR("Richard Zhao <richard.zhao@freescale.com>"); MODULE_DESCRIPTION("Freescale MXS USB PHY driver"); MODULE_LICENSE("GPL");
gpl-2.0
balika011/android_kernel_lenovo_spark
fs/gfs2/ops_fstype.c
406
36601
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/export.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/gfs2_ondisk.h> #include <linux/quotaops.h> #include <linux/lockdep.h> #include <linux/module.h> #include "gfs2.h" #include "incore.h" #include "bmap.h" #include "glock.h" #include "glops.h" #include "inode.h" #include "recovery.h" #include "rgrp.h" #include "super.h" #include "sys.h" #include "util.h" #include "log.h" #include "quota.h" #include "dir.h" #include "trace_gfs2.h" #define DO 0 #define UNDO 1 /** * gfs2_tune_init - Fill a gfs2_tune structure with default values * @gt: tune * */ static void gfs2_tune_init(struct gfs2_tune *gt) { spin_lock_init(&gt->gt_spin); gt->gt_quota_simul_sync = 64; gt->gt_quota_warn_period = 10; gt->gt_quota_scale_num = 1; gt->gt_quota_scale_den = 1; gt->gt_new_files_jdata = 0; gt->gt_max_readahead = 1 << 18; gt->gt_complain_secs = 10; } static struct gfs2_sbd *init_sbd(struct super_block *sb) { struct gfs2_sbd *sdp; sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL); if (!sdp) return NULL; sb->s_fs_info = sdp; sdp->sd_vfs = sb; sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats); if (!sdp->sd_lkstats) { kfree(sdp); return NULL; } set_bit(SDF_NOJOURNALID, &sdp->sd_flags); gfs2_tune_init(&sdp->sd_tune); init_waitqueue_head(&sdp->sd_glock_wait); atomic_set(&sdp->sd_glock_disposal, 0); init_completion(&sdp->sd_locking_init); init_completion(&sdp->sd_wdack); spin_lock_init(&sdp->sd_statfs_spin); spin_lock_init(&sdp->sd_rindex_spin); sdp->sd_rindex_tree.rb_node = NULL; INIT_LIST_HEAD(&sdp->sd_jindex_list); spin_lock_init(&sdp->sd_jindex_spin); mutex_init(&sdp->sd_jindex_mutex); INIT_LIST_HEAD(&sdp->sd_quota_list); mutex_init(&sdp->sd_quota_mutex); init_waitqueue_head(&sdp->sd_quota_wait); INIT_LIST_HEAD(&sdp->sd_trunc_list); spin_lock_init(&sdp->sd_trunc_lock); spin_lock_init(&sdp->sd_log_lock); atomic_set(&sdp->sd_log_pinned, 0); INIT_LIST_HEAD(&sdp->sd_log_le_buf); INIT_LIST_HEAD(&sdp->sd_log_le_revoke); INIT_LIST_HEAD(&sdp->sd_log_le_databuf); INIT_LIST_HEAD(&sdp->sd_log_le_ordered); spin_lock_init(&sdp->sd_ordered_lock); init_waitqueue_head(&sdp->sd_log_waitq); init_waitqueue_head(&sdp->sd_logd_waitq); spin_lock_init(&sdp->sd_ail_lock); INIT_LIST_HEAD(&sdp->sd_ail1_list); INIT_LIST_HEAD(&sdp->sd_ail2_list); init_rwsem(&sdp->sd_log_flush_lock); atomic_set(&sdp->sd_log_in_flight, 0); init_waitqueue_head(&sdp->sd_log_flush_wait); INIT_LIST_HEAD(&sdp->sd_revoke_list); return sdp; } /** * gfs2_check_sb - Check superblock * @sdp: the filesystem * @sb: The superblock * @silent: Don't print a message if the check fails * * Checks the version code of the FS is one that we understand how to * read and that the sizes of the various on-disk structures have not * changed. */ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) { struct gfs2_sb_host *sb = &sdp->sd_sb; if (sb->sb_magic != GFS2_MAGIC || sb->sb_type != GFS2_METATYPE_SB) { if (!silent) printk(KERN_WARNING "GFS2: not a GFS2 filesystem\n"); return -EINVAL; } /* If format numbers match exactly, we're done. */ if (sb->sb_fs_format == GFS2_FORMAT_FS && sb->sb_multihost_format == GFS2_FORMAT_MULTI) return 0; fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); return -EINVAL; } static void end_bio_io_page(struct bio *bio, int error) { struct page *page = bio->bi_private; if (!error) SetPageUptodate(page); else printk(KERN_WARNING "gfs2: error %d reading superblock\n", error); unlock_page(page); } static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf) { struct gfs2_sb_host *sb = &sdp->sd_sb; struct super_block *s = sdp->sd_vfs; const struct gfs2_sb *str = buf; sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic); sb->sb_type = be32_to_cpu(str->sb_header.mh_type); sb->sb_format = be32_to_cpu(str->sb_header.mh_format); sb->sb_fs_format = be32_to_cpu(str->sb_fs_format); sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format); sb->sb_bsize = be32_to_cpu(str->sb_bsize); sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift); sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr); sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino); sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr); sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino); memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN); memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN); memcpy(s->s_uuid, str->sb_uuid, 16); } /** * gfs2_read_super - Read the gfs2 super block from disk * @sdp: The GFS2 super block * @sector: The location of the super block * @error: The error code to return * * This uses the bio functions to read the super block from disk * because we want to be 100% sure that we never read cached data. * A super block is read twice only during each GFS2 mount and is * never written to by the filesystem. The first time its read no * locks are held, and the only details which are looked at are those * relating to the locking protocol. Once locking is up and working, * the sb is read again under the lock to establish the location of * the master directory (contains pointers to journals etc) and the * root directory. * * Returns: 0 on success or error */ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) { struct super_block *sb = sdp->sd_vfs; struct gfs2_sb *p; struct page *page; struct bio *bio; page = alloc_page(GFP_NOFS); if (unlikely(!page)) return -ENOBUFS; ClearPageUptodate(page); ClearPageDirty(page); lock_page(page); bio = bio_alloc(GFP_NOFS, 1); bio->bi_sector = sector * (sb->s_blocksize >> 9); bio->bi_bdev = sb->s_bdev; bio_add_page(bio, page, PAGE_SIZE, 0); bio->bi_end_io = end_bio_io_page; bio->bi_private = page; submit_bio(READ_SYNC | REQ_META, bio); wait_on_page_locked(page); bio_put(bio); if (!PageUptodate(page)) { __free_page(page); return -EIO; } p = kmap(page); gfs2_sb_in(sdp, p); kunmap(page); __free_page(page); return gfs2_check_sb(sdp, silent); } /** * gfs2_read_sb - Read super block * @sdp: The GFS2 superblock * @silent: Don't print message if mount fails * */ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent) { u32 hash_blocks, ind_blocks, leaf_blocks; u32 tmp_blocks; unsigned int x; int error; error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent); if (error) { if (!silent) fs_err(sdp, "can't read superblock\n"); return error; } sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT; sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; sdp->sd_diptrs = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) / sizeof(u64); sdp->sd_inptrs = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header); sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2; sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1; sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64); sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(struct gfs2_quota_change); sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) * GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */ /* Compute maximum reservation required to add a entry to a directory */ hash_blocks = DIV_ROUND_UP(sizeof(u64) * (1 << GFS2_DIR_MAX_DEPTH), sdp->sd_jbsize); ind_blocks = 0; for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) { tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs); ind_blocks += tmp_blocks; } leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH; sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks; sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs; for (x = 2;; x++) { u64 space, d; u32 m; space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs; d = space; m = do_div(d, sdp->sd_inptrs); if (d != sdp->sd_heightsize[x - 1] || m) break; sdp->sd_heightsize[x] = space; } sdp->sd_max_height = x; sdp->sd_heightsize[x] = ~0; gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT); sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); sdp->sd_jheightsize[1] = sdp->sd_jbsize * sdp->sd_diptrs; for (x = 2;; x++) { u64 space, d; u32 m; space = sdp->sd_jheightsize[x - 1] * sdp->sd_inptrs; d = space; m = do_div(d, sdp->sd_inptrs); if (d != sdp->sd_jheightsize[x - 1] || m) break; sdp->sd_jheightsize[x] = space; } sdp->sd_max_jheight = x; sdp->sd_jheightsize[x] = ~0; gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT); return 0; } static int init_names(struct gfs2_sbd *sdp, int silent) { char *proto, *table; int error = 0; proto = sdp->sd_args.ar_lockproto; table = sdp->sd_args.ar_locktable; /* Try to autodetect */ if (!proto[0] || !table[0]) { error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent); if (error) return error; if (!proto[0]) proto = sdp->sd_sb.sb_lockproto; if (!table[0]) table = sdp->sd_sb.sb_locktable; } if (!table[0]) table = sdp->sd_vfs->s_id; strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN); strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN); table = sdp->sd_table_name; while ((table = strchr(table, '/'))) *table = '_'; return error; } static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh, int undo) { int error = 0; if (undo) goto fail_trans; error = gfs2_glock_nq_num(sdp, GFS2_MOUNT_LOCK, &gfs2_nondisk_glops, LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE, mount_gh); if (error) { fs_err(sdp, "can't acquire mount glock: %d\n", error); goto fail; } error = gfs2_glock_nq_num(sdp, GFS2_LIVE_LOCK, &gfs2_nondisk_glops, LM_ST_SHARED, LM_FLAG_NOEXP | GL_EXACT, &sdp->sd_live_gh); if (error) { fs_err(sdp, "can't acquire live glock: %d\n", error); goto fail_mount; } error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops, CREATE, &sdp->sd_rename_gl); if (error) { fs_err(sdp, "can't create rename glock: %d\n", error); goto fail_live; } error = gfs2_glock_get(sdp, GFS2_TRANS_LOCK, &gfs2_trans_glops, CREATE, &sdp->sd_trans_gl); if (error) { fs_err(sdp, "can't create transaction glock: %d\n", error); goto fail_rename; } return 0; fail_trans: gfs2_glock_put(sdp->sd_trans_gl); fail_rename: gfs2_glock_put(sdp->sd_rename_gl); fail_live: gfs2_glock_dq_uninit(&sdp->sd_live_gh); fail_mount: gfs2_glock_dq_uninit(mount_gh); fail: return error; } static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr, u64 no_addr, const char *name) { struct gfs2_sbd *sdp = sb->s_fs_info; struct dentry *dentry; struct inode *inode; inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0); if (IS_ERR(inode)) { fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode)); return PTR_ERR(inode); } dentry = d_make_root(inode); if (!dentry) { fs_err(sdp, "can't alloc %s dentry\n", name); return -ENOMEM; } *dptr = dentry; return 0; } static int init_sb(struct gfs2_sbd *sdp, int silent) { struct super_block *sb = sdp->sd_vfs; struct gfs2_holder sb_gh; u64 no_addr; int ret; ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops, LM_ST_SHARED, 0, &sb_gh); if (ret) { fs_err(sdp, "can't acquire superblock glock: %d\n", ret); return ret; } ret = gfs2_read_sb(sdp, silent); if (ret) { fs_err(sdp, "can't read superblock: %d\n", ret); goto out; } /* Set up the buffer cache and SB for real */ if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) { ret = -EINVAL; fs_err(sdp, "FS block size (%u) is too small for device " "block size (%u)\n", sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev)); goto out; } if (sdp->sd_sb.sb_bsize > PAGE_SIZE) { ret = -EINVAL; fs_err(sdp, "FS block size (%u) is too big for machine " "page size (%u)\n", sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE); goto out; } sb_set_blocksize(sb, sdp->sd_sb.sb_bsize); /* Get the root inode */ no_addr = sdp->sd_sb.sb_root_dir.no_addr; ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root"); if (ret) goto out; /* Get the master inode */ no_addr = sdp->sd_sb.sb_master_dir.no_addr; ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master"); if (ret) { dput(sdp->sd_root_dir); goto out; } sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir); out: gfs2_glock_dq_uninit(&sb_gh); return ret; } /** * map_journal_extents - create a reusable "extent" mapping from all logical * blocks to all physical blocks for the given journal. This will save * us time when writing journal blocks. Most journals will have only one * extent that maps all their logical blocks. That's because gfs2.mkfs * arranges the journal blocks sequentially to maximize performance. * So the extent would map the first block for the entire file length. * However, gfs2_jadd can happen while file activity is happening, so * those journals may not be sequential. Less likely is the case where * the users created their own journals by mounting the metafs and * laying it out. But it's still possible. These journals might have * several extents. * * TODO: This should be done in bigger chunks rather than one block at a time, * but since it's only done at mount time, I'm not worried about the * time it takes. */ static int map_journal_extents(struct gfs2_sbd *sdp) { struct gfs2_jdesc *jd = sdp->sd_jdesc; unsigned int lb; u64 db, prev_db; /* logical block, disk block, prev disk block */ struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_journal_extent *jext = NULL; struct buffer_head bh; int rc = 0; prev_db = 0; for (lb = 0; lb < i_size_read(jd->jd_inode) >> sdp->sd_sb.sb_bsize_shift; lb++) { bh.b_state = 0; bh.b_blocknr = 0; bh.b_size = 1 << ip->i_inode.i_blkbits; rc = gfs2_block_map(jd->jd_inode, lb, &bh, 0); db = bh.b_blocknr; if (rc || !db) { printk(KERN_INFO "GFS2 journal mapping error %d: lb=" "%u db=%llu\n", rc, lb, (unsigned long long)db); break; } if (!prev_db || db != prev_db + 1) { jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_KERNEL); if (!jext) { printk(KERN_INFO "GFS2 error: out of memory " "mapping journal extents.\n"); rc = -ENOMEM; break; } jext->dblock = db; jext->lblock = lb; jext->blocks = 1; list_add_tail(&jext->extent_list, &jd->extent_list); } else { jext->blocks++; } prev_db = db; } return rc; } static void gfs2_others_may_mount(struct gfs2_sbd *sdp) { char *message = "FIRSTMOUNT=Done"; char *envp[] = { message, NULL }; fs_info(sdp, "first mount done, others may mount\n"); if (sdp->sd_lockstruct.ls_ops->lm_first_done) sdp->sd_lockstruct.ls_ops->lm_first_done(sdp); kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp); } /** * gfs2_jindex_hold - Grab a lock on the jindex * @sdp: The GFS2 superblock * @ji_gh: the holder for the jindex glock * * Returns: errno */ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) { struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex); struct qstr name; char buf[20]; struct gfs2_jdesc *jd; int error; name.name = buf; mutex_lock(&sdp->sd_jindex_mutex); for (;;) { error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); if (error) break; name.len = sprintf(buf, "journal%u", sdp->sd_journals); name.hash = gfs2_disk_hash(name.name, name.len); error = gfs2_dir_check(sdp->sd_jindex, &name, NULL); if (error == -ENOENT) { error = 0; break; } gfs2_glock_dq_uninit(ji_gh); if (error) break; error = -ENOMEM; jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL); if (!jd) break; INIT_LIST_HEAD(&jd->extent_list); INIT_WORK(&jd->jd_work, gfs2_recover_func); jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1); if (!jd->jd_inode || IS_ERR(jd->jd_inode)) { if (!jd->jd_inode) error = -ENOENT; else error = PTR_ERR(jd->jd_inode); kfree(jd); break; } spin_lock(&sdp->sd_jindex_spin); jd->jd_jid = sdp->sd_journals++; list_add_tail(&jd->jd_list, &sdp->sd_jindex_list); spin_unlock(&sdp->sd_jindex_spin); } mutex_unlock(&sdp->sd_jindex_mutex); return error; } static int init_journal(struct gfs2_sbd *sdp, int undo) { struct inode *master = sdp->sd_master_dir->d_inode; struct gfs2_holder ji_gh; struct gfs2_inode *ip; int jindex = 1; int error = 0; if (undo) { jindex = 0; goto fail_jinode_gh; } sdp->sd_jindex = gfs2_lookup_simple(master, "jindex"); if (IS_ERR(sdp->sd_jindex)) { fs_err(sdp, "can't lookup journal index: %d\n", error); return PTR_ERR(sdp->sd_jindex); } /* Load in the journal index special file */ error = gfs2_jindex_hold(sdp, &ji_gh); if (error) { fs_err(sdp, "can't read journal index: %d\n", error); goto fail; } error = -EUSERS; if (!gfs2_jindex_size(sdp)) { fs_err(sdp, "no journals!\n"); goto fail_jindex; } if (sdp->sd_args.ar_spectator) { sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0); atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5); atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5); } else { if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) { fs_err(sdp, "can't mount journal #%u\n", sdp->sd_lockstruct.ls_jid); fs_err(sdp, "there are only %u journals (0 - %u)\n", gfs2_jindex_size(sdp), gfs2_jindex_size(sdp) - 1); goto fail_jindex; } sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid); error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid, &gfs2_journal_glops, LM_ST_EXCLUSIVE, LM_FLAG_NOEXP, &sdp->sd_journal_gh); if (error) { fs_err(sdp, "can't acquire journal glock: %d\n", error); goto fail_jindex; } ip = GFS2_I(sdp->sd_jdesc->jd_inode); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE, &sdp->sd_jinode_gh); if (error) { fs_err(sdp, "can't acquire journal inode glock: %d\n", error); goto fail_journal_gh; } error = gfs2_jdesc_check(sdp->sd_jdesc); if (error) { fs_err(sdp, "my journal (%u) is bad: %d\n", sdp->sd_jdesc->jd_jid, error); goto fail_jinode_gh; } atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5); atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5); /* Map the extents for this journal's blocks */ map_journal_extents(sdp); } trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free)); if (sdp->sd_lockstruct.ls_first) { unsigned int x; for (x = 0; x < sdp->sd_journals; x++) { error = gfs2_recover_journal(gfs2_jdesc_find(sdp, x), true); if (error) { fs_err(sdp, "error recovering journal %u: %d\n", x, error); goto fail_jinode_gh; } } gfs2_others_may_mount(sdp); } else if (!sdp->sd_args.ar_spectator) { error = gfs2_recover_journal(sdp->sd_jdesc, true); if (error) { fs_err(sdp, "error recovering my journal: %d\n", error); goto fail_jinode_gh; } } set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags); gfs2_glock_dq_uninit(&ji_gh); jindex = 0; return 0; fail_jinode_gh: if (!sdp->sd_args.ar_spectator) gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); fail_journal_gh: if (!sdp->sd_args.ar_spectator) gfs2_glock_dq_uninit(&sdp->sd_journal_gh); fail_jindex: gfs2_jindex_free(sdp); if (jindex) gfs2_glock_dq_uninit(&ji_gh); fail: iput(sdp->sd_jindex); return error; } static struct lock_class_key gfs2_quota_imutex_key; static int init_inodes(struct gfs2_sbd *sdp, int undo) { int error = 0; struct inode *master = sdp->sd_master_dir->d_inode; if (undo) goto fail_qinode; error = init_journal(sdp, undo); if (error) goto fail; /* Read in the master statfs inode */ sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs"); if (IS_ERR(sdp->sd_statfs_inode)) { error = PTR_ERR(sdp->sd_statfs_inode); fs_err(sdp, "can't read in statfs inode: %d\n", error); goto fail_journal; } /* Read in the resource index inode */ sdp->sd_rindex = gfs2_lookup_simple(master, "rindex"); if (IS_ERR(sdp->sd_rindex)) { error = PTR_ERR(sdp->sd_rindex); fs_err(sdp, "can't get resource index inode: %d\n", error); goto fail_statfs; } sdp->sd_rindex_uptodate = 0; /* Read in the quota inode */ sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota"); if (IS_ERR(sdp->sd_quota_inode)) { error = PTR_ERR(sdp->sd_quota_inode); fs_err(sdp, "can't get quota file inode: %d\n", error); goto fail_rindex; } /* * i_mutex on quota files is special. Since this inode is hidden system * file, we are safe to define locking ourselves. */ lockdep_set_class(&sdp->sd_quota_inode->i_mutex, &gfs2_quota_imutex_key); error = gfs2_rindex_update(sdp); if (error) goto fail_qinode; return 0; fail_qinode: iput(sdp->sd_quota_inode); fail_rindex: gfs2_clear_rgrpd(sdp); iput(sdp->sd_rindex); fail_statfs: iput(sdp->sd_statfs_inode); fail_journal: init_journal(sdp, UNDO); fail: return error; } static int init_per_node(struct gfs2_sbd *sdp, int undo) { struct inode *pn = NULL; char buf[30]; int error = 0; struct gfs2_inode *ip; struct inode *master = sdp->sd_master_dir->d_inode; if (sdp->sd_args.ar_spectator) return 0; if (undo) goto fail_qc_gh; pn = gfs2_lookup_simple(master, "per_node"); if (IS_ERR(pn)) { error = PTR_ERR(pn); fs_err(sdp, "can't find per_node directory: %d\n", error); return error; } sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid); sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf); if (IS_ERR(sdp->sd_sc_inode)) { error = PTR_ERR(sdp->sd_sc_inode); fs_err(sdp, "can't find local \"sc\" file: %d\n", error); goto fail; } sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid); sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf); if (IS_ERR(sdp->sd_qc_inode)) { error = PTR_ERR(sdp->sd_qc_inode); fs_err(sdp, "can't find local \"qc\" file: %d\n", error); goto fail_ut_i; } iput(pn); pn = NULL; ip = GFS2_I(sdp->sd_sc_inode); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &sdp->sd_sc_gh); if (error) { fs_err(sdp, "can't lock local \"sc\" file: %d\n", error); goto fail_qc_i; } ip = GFS2_I(sdp->sd_qc_inode); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &sdp->sd_qc_gh); if (error) { fs_err(sdp, "can't lock local \"qc\" file: %d\n", error); goto fail_ut_gh; } return 0; fail_qc_gh: gfs2_glock_dq_uninit(&sdp->sd_qc_gh); fail_ut_gh: gfs2_glock_dq_uninit(&sdp->sd_sc_gh); fail_qc_i: iput(sdp->sd_qc_inode); fail_ut_i: iput(sdp->sd_sc_inode); fail: if (pn) iput(pn); return error; } static int init_threads(struct gfs2_sbd *sdp, int undo) { struct task_struct *p; int error = 0; if (undo) goto fail_quotad; p = kthread_run(gfs2_logd, sdp, "gfs2_logd"); error = IS_ERR(p); if (error) { fs_err(sdp, "can't start logd thread: %d\n", error); return error; } sdp->sd_logd_process = p; p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad"); error = IS_ERR(p); if (error) { fs_err(sdp, "can't start quotad thread: %d\n", error); goto fail; } sdp->sd_quotad_process = p; return 0; fail_quotad: kthread_stop(sdp->sd_quotad_process); fail: kthread_stop(sdp->sd_logd_process); return error; } static const match_table_t nolock_tokens = { { Opt_jid, "jid=%d\n", }, { Opt_err, NULL }, }; static const struct lm_lockops nolock_ops = { .lm_proto_name = "lock_nolock", .lm_put_lock = gfs2_glock_free, .lm_tokens = &nolock_tokens, }; /** * gfs2_lm_mount - mount a locking protocol * @sdp: the filesystem * @args: mount arguments * @silent: if 1, don't complain if the FS isn't a GFS2 fs * * Returns: errno */ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent) { const struct lm_lockops *lm; struct lm_lockstruct *ls = &sdp->sd_lockstruct; struct gfs2_args *args = &sdp->sd_args; const char *proto = sdp->sd_proto_name; const char *table = sdp->sd_table_name; char *o, *options; int ret; if (!strcmp("lock_nolock", proto)) { lm = &nolock_ops; sdp->sd_args.ar_localflocks = 1; #ifdef CONFIG_GFS2_FS_LOCKING_DLM } else if (!strcmp("lock_dlm", proto)) { lm = &gfs2_dlm_ops; #endif } else { printk(KERN_INFO "GFS2: can't find protocol %s\n", proto); return -ENOENT; } fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table); ls->ls_ops = lm; ls->ls_first = 1; for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) { substring_t tmp[MAX_OPT_ARGS]; int token, option; if (!o || !*o) continue; token = match_token(o, *lm->lm_tokens, tmp); switch (token) { case Opt_jid: ret = match_int(&tmp[0], &option); if (ret || option < 0) goto hostdata_error; if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags)) ls->ls_jid = option; break; case Opt_id: case Opt_nodir: /* Obsolete, but left for backward compat purposes */ break; case Opt_first: ret = match_int(&tmp[0], &option); if (ret || (option != 0 && option != 1)) goto hostdata_error; ls->ls_first = option; break; case Opt_err: default: hostdata_error: fs_info(sdp, "unknown hostdata (%s)\n", o); return -EINVAL; } } if (lm->lm_mount == NULL) { fs_info(sdp, "Now mounting FS...\n"); complete_all(&sdp->sd_locking_init); return 0; } ret = lm->lm_mount(sdp, table); if (ret == 0) fs_info(sdp, "Joined cluster. Now mounting FS...\n"); complete_all(&sdp->sd_locking_init); return ret; } void gfs2_lm_unmount(struct gfs2_sbd *sdp) { const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops; if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) && lm->lm_unmount) lm->lm_unmount(sdp); } static int gfs2_journalid_wait(void *word) { if (signal_pending(current)) return -EINTR; schedule(); return 0; } static int wait_on_journal(struct gfs2_sbd *sdp) { if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) return 0; return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, gfs2_journalid_wait, TASK_INTERRUPTIBLE); } void gfs2_online_uevent(struct gfs2_sbd *sdp) { struct super_block *sb = sdp->sd_vfs; char ro[20]; char spectator[20]; char *envp[] = { ro, spectator, NULL }; sprintf(ro, "RDONLY=%d", (sb->s_flags & MS_RDONLY) ? 1 : 0); sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0); kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp); } /** * fill_super - Read in superblock * @sb: The VFS superblock * @data: Mount options * @silent: Don't complain if it's not a GFS2 filesystem * * Returns: errno */ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent) { struct gfs2_sbd *sdp; struct gfs2_holder mount_gh; int error; sdp = init_sbd(sb); if (!sdp) { printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n"); return -ENOMEM; } sdp->sd_args = *args; if (sdp->sd_args.ar_spectator) { sb->s_flags |= MS_RDONLY; set_bit(SDF_RORECOVERY, &sdp->sd_flags); } if (sdp->sd_args.ar_posix_acl) sb->s_flags |= MS_POSIXACL; if (sdp->sd_args.ar_nobarrier) set_bit(SDF_NOBARRIERS, &sdp->sd_flags); sb->s_flags |= MS_NOSEC; sb->s_magic = GFS2_MAGIC; sb->s_op = &gfs2_super_ops; sb->s_d_op = &gfs2_dops; sb->s_export_op = &gfs2_export_ops; sb->s_xattr = gfs2_xattr_handlers; sb->s_qcop = &gfs2_quotactl_ops; sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; sb->s_time_gran = 1; sb->s_maxbytes = MAX_LFS_FILESIZE; /* Set up the buffer cache and fill in some fake block size values to allow us to read-in the on-disk superblock. */ sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK); sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits; sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT; sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit; sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum; if (sdp->sd_args.ar_statfs_quantum) { sdp->sd_tune.gt_statfs_slow = 0; sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum; } else { sdp->sd_tune.gt_statfs_slow = 1; sdp->sd_tune.gt_statfs_quantum = 30; } error = init_names(sdp, silent); if (error) { /* In this case, we haven't initialized sysfs, so we have to manually free the sdp. */ free_percpu(sdp->sd_lkstats); kfree(sdp); sb->s_fs_info = NULL; return error; } snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s", sdp->sd_table_name); error = gfs2_sys_fs_add(sdp); /* * If we hit an error here, gfs2_sys_fs_add will have called function * kobject_put which causes the sysfs usage count to go to zero, which * causes sysfs to call function gfs2_sbd_release, which frees sdp. * Subsequent error paths here will call gfs2_sys_fs_del, which also * kobject_put to free sdp. */ if (error) return error; gfs2_create_debugfs_file(sdp); error = gfs2_lm_mount(sdp, silent); if (error) goto fail_debug; error = init_locking(sdp, &mount_gh, DO); if (error) goto fail_lm; error = init_sb(sdp, silent); if (error) goto fail_locking; error = wait_on_journal(sdp); if (error) goto fail_sb; /* * If user space has failed to join the cluster or some similar * failure has occurred, then the journal id will contain a * negative (error) number. This will then be returned to the * caller (of the mount syscall). We do this even for spectator * mounts (which just write a jid of 0 to indicate "ok" even though * the jid is unused in the spectator case) */ if (sdp->sd_lockstruct.ls_jid < 0) { error = sdp->sd_lockstruct.ls_jid; sdp->sd_lockstruct.ls_jid = 0; goto fail_sb; } if (sdp->sd_args.ar_spectator) snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s", sdp->sd_table_name); else snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u", sdp->sd_table_name, sdp->sd_lockstruct.ls_jid); error = init_inodes(sdp, DO); if (error) goto fail_sb; error = init_per_node(sdp, DO); if (error) goto fail_inodes; error = gfs2_statfs_init(sdp); if (error) { fs_err(sdp, "can't initialize statfs subsystem: %d\n", error); goto fail_per_node; } error = init_threads(sdp, DO); if (error) goto fail_per_node; if (!(sb->s_flags & MS_RDONLY)) { error = gfs2_make_fs_rw(sdp); if (error) { fs_err(sdp, "can't make FS RW: %d\n", error); goto fail_threads; } } gfs2_glock_dq_uninit(&mount_gh); gfs2_online_uevent(sdp); return 0; fail_threads: init_threads(sdp, UNDO); fail_per_node: init_per_node(sdp, UNDO); fail_inodes: init_inodes(sdp, UNDO); fail_sb: if (sdp->sd_root_dir) dput(sdp->sd_root_dir); if (sdp->sd_master_dir) dput(sdp->sd_master_dir); if (sb->s_root) dput(sb->s_root); sb->s_root = NULL; fail_locking: init_locking(sdp, &mount_gh, UNDO); fail_lm: gfs2_gl_hash_clear(sdp); gfs2_lm_unmount(sdp); fail_debug: gfs2_delete_debugfs_file(sdp); free_percpu(sdp->sd_lkstats); /* gfs2_sys_fs_del must be the last thing we do, since it causes * sysfs to call function gfs2_sbd_release, which frees sdp. */ gfs2_sys_fs_del(sdp); sb->s_fs_info = NULL; return error; } static int set_gfs2_super(struct super_block *s, void *data) { s->s_bdev = data; s->s_dev = s->s_bdev->bd_dev; /* * We set the bdi here to the queue backing, file systems can * overwrite this in ->fill_super() */ s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; return 0; } static int test_gfs2_super(struct super_block *s, void *ptr) { struct block_device *bdev = ptr; return (bdev == s->s_bdev); } /** * gfs2_mount - Get the GFS2 superblock * @fs_type: The GFS2 filesystem type * @flags: Mount flags * @dev_name: The name of the device * @data: The mount arguments * * Q. Why not use get_sb_bdev() ? * A. We need to select one of two root directories to mount, independent * of whether this is the initial, or subsequent, mount of this sb * * Returns: 0 or -ve on error */ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct block_device *bdev; struct super_block *s; fmode_t mode = FMODE_READ | FMODE_EXCL; int error; struct gfs2_args args; struct gfs2_sbd *sdp; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; bdev = blkdev_get_by_path(dev_name, mode, fs_type); if (IS_ERR(bdev)) return ERR_CAST(bdev); /* * once the super is inserted into the list by sget, s_umount * will protect the lockfs code from trying to start a snapshot * while we are mounting */ mutex_lock(&bdev->bd_fsfreeze_mutex); if (bdev->bd_fsfreeze_count > 0) { mutex_unlock(&bdev->bd_fsfreeze_mutex); error = -EBUSY; goto error_bdev; } s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev); mutex_unlock(&bdev->bd_fsfreeze_mutex); error = PTR_ERR(s); if (IS_ERR(s)) goto error_bdev; if (s->s_root) blkdev_put(bdev, mode); memset(&args, 0, sizeof(args)); args.ar_quota = GFS2_QUOTA_DEFAULT; args.ar_data = GFS2_DATA_DEFAULT; args.ar_commit = 30; args.ar_statfs_quantum = 30; args.ar_quota_quantum = 60; args.ar_errors = GFS2_ERRORS_DEFAULT; error = gfs2_mount_args(&args, data); if (error) { printk(KERN_WARNING "GFS2: can't parse mount arguments\n"); goto error_super; } if (s->s_root) { error = -EBUSY; if ((flags ^ s->s_flags) & MS_RDONLY) goto error_super; } else { char b[BDEVNAME_SIZE]; s->s_mode = mode; strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(bdev)); error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0); if (error) goto error_super; s->s_flags |= MS_ACTIVE; bdev->bd_super = s; } sdp = s->s_fs_info; if (args.ar_meta) return dget(sdp->sd_master_dir); else return dget(sdp->sd_root_dir); error_super: deactivate_locked_super(s); return ERR_PTR(error); error_bdev: blkdev_put(bdev, mode); return ERR_PTR(error); } static int set_meta_super(struct super_block *s, void *ptr) { return -EINVAL; } static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct super_block *s; struct gfs2_sbd *sdp; struct path path; int error; error = kern_path(dev_name, LOOKUP_FOLLOW, &path); if (error) { printk(KERN_WARNING "GFS2: path_lookup on %s returned error %d\n", dev_name, error); return ERR_PTR(error); } s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags, path.dentry->d_inode->i_sb->s_bdev); path_put(&path); if (IS_ERR(s)) { printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n"); return ERR_CAST(s); } if ((flags ^ s->s_flags) & MS_RDONLY) { deactivate_locked_super(s); return ERR_PTR(-EBUSY); } sdp = s->s_fs_info; return dget(sdp->sd_master_dir); } static void gfs2_kill_sb(struct super_block *sb) { struct gfs2_sbd *sdp = sb->s_fs_info; if (sdp == NULL) { kill_block_super(sb); return; } gfs2_meta_syncfs(sdp); dput(sdp->sd_root_dir); dput(sdp->sd_master_dir); sdp->sd_root_dir = NULL; sdp->sd_master_dir = NULL; shrink_dcache_sb(sb); gfs2_delete_debugfs_file(sdp); free_percpu(sdp->sd_lkstats); kill_block_super(sb); } struct file_system_type gfs2_fs_type = { .name = "gfs2", .fs_flags = FS_REQUIRES_DEV, .mount = gfs2_mount, .kill_sb = gfs2_kill_sb, .owner = THIS_MODULE, }; MODULE_ALIAS_FS("gfs2"); struct file_system_type gfs2meta_fs_type = { .name = "gfs2meta", .fs_flags = FS_REQUIRES_DEV, .mount = gfs2_mount_meta, .owner = THIS_MODULE, }; MODULE_ALIAS_FS("gfs2meta");
gpl-2.0
cleaton/acer_kernel
arch/ia64/mm/discontig.c
662
20222
/* * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Tony Luck <tony.luck@intel.com> * Copyright (c) 2002 NEC Corp. * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com> * Copyright (c) 2004 Silicon Graphics, Inc * Russ Anderson <rja@sgi.com> * Jesse Barnes <jbarnes@sgi.com> * Jack Steiner <steiner@sgi.com> */ /* * Platform initialization for Discontig Memory */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/nmi.h> #include <linux/swap.h> #include <linux/bootmem.h> #include <linux/acpi.h> #include <linux/efi.h> #include <linux/nodemask.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/meminit.h> #include <asm/numa.h> #include <asm/sections.h> /* * Track per-node information needed to setup the boot memory allocator, the * per-node areas, and the real VM. */ struct early_node_data { struct ia64_node_data *node_data; unsigned long pernode_addr; unsigned long pernode_size; unsigned long num_physpages; #ifdef CONFIG_ZONE_DMA unsigned long num_dma_physpages; #endif unsigned long min_pfn; unsigned long max_pfn; }; static struct early_node_data mem_data[MAX_NUMNODES] __initdata; static nodemask_t memory_less_mask __initdata; pg_data_t *pgdat_list[MAX_NUMNODES]; /* * To prevent cache aliasing effects, align per-node structures so that they * start at addresses that are strided by node number. */ #define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024) #define NODEDATA_ALIGN(addr, node) \ ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \ (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1))) /** * build_node_maps - callback to setup bootmem structs for each node * @start: physical start of range * @len: length of range * @node: node where this range resides * * We allocate a struct bootmem_data for each piece of memory that we wish to * treat as a virtually contiguous block (i.e. each node). Each such block * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down * if necessary. Any non-existent pages will simply be part of the virtual * memmap. We also update min_low_pfn and max_low_pfn here as we receive * memory ranges from the caller. */ static int __init build_node_maps(unsigned long start, unsigned long len, int node) { unsigned long spfn, epfn, end = start + len; struct bootmem_data *bdp = &bootmem_node_data[node]; epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT; if (!bdp->node_low_pfn) { bdp->node_min_pfn = spfn; bdp->node_low_pfn = epfn; } else { bdp->node_min_pfn = min(spfn, bdp->node_min_pfn); bdp->node_low_pfn = max(epfn, bdp->node_low_pfn); } return 0; } /** * early_nr_cpus_node - return number of cpus on a given node * @node: node to check * * Count the number of cpus on @node. We can't use nr_cpus_node() yet because * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been * called yet. Note that node 0 will also count all non-existent cpus. */ static int __meminit early_nr_cpus_node(int node) { int cpu, n = 0; for_each_possible_early_cpu(cpu) if (node == node_cpuid[cpu].nid) n++; return n; } /** * compute_pernodesize - compute size of pernode data * @node: the node id. */ static unsigned long __meminit compute_pernodesize(int node) { unsigned long pernodesize = 0, cpus; cpus = early_nr_cpus_node(node); pernodesize += PERCPU_PAGE_SIZE * cpus; pernodesize += node * L1_CACHE_BYTES; pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); pernodesize = PAGE_ALIGN(pernodesize); return pernodesize; } /** * per_cpu_node_setup - setup per-cpu areas on each node * @cpu_data: per-cpu area on this node * @node: node to setup * * Copy the static per-cpu data into the region we just set aside and then * setup __per_cpu_offset for each CPU on this node. Return a pointer to * the end of the area. */ static void *per_cpu_node_setup(void *cpu_data, int node) { #ifdef CONFIG_SMP int cpu; for_each_possible_early_cpu(cpu) { if (cpu == 0) { void *cpu0_data = __cpu0_per_cpu; __per_cpu_offset[cpu] = (char*)cpu0_data - __per_cpu_start; } else if (node == node_cpuid[cpu].nid) { memcpy(__va(cpu_data), __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); __per_cpu_offset[cpu] = (char*)__va(cpu_data) - __per_cpu_start; cpu_data += PERCPU_PAGE_SIZE; } } #endif return cpu_data; } /** * fill_pernode - initialize pernode data. * @node: the node id. * @pernode: physical address of pernode data * @pernodesize: size of the pernode data */ static void __init fill_pernode(int node, unsigned long pernode, unsigned long pernodesize) { void *cpu_data; int cpus = early_nr_cpus_node(node); struct bootmem_data *bdp = &bootmem_node_data[node]; mem_data[node].pernode_addr = pernode; mem_data[node].pernode_size = pernodesize; memset(__va(pernode), 0, pernodesize); cpu_data = (void *)pernode; pernode += PERCPU_PAGE_SIZE * cpus; pernode += node * L1_CACHE_BYTES; pgdat_list[node] = __va(pernode); pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); mem_data[node].node_data = __va(pernode); pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); pgdat_list[node]->bdata = bdp; pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); cpu_data = per_cpu_node_setup(cpu_data, node); return; } /** * find_pernode_space - allocate memory for memory map and per-node structures * @start: physical start of range * @len: length of range * @node: node where this range resides * * This routine reserves space for the per-cpu data struct, the list of * pg_data_ts and the per-node data struct. Each node will have something like * the following in the first chunk of addr. space large enough to hold it. * * ________________________ * | | * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first * | PERCPU_PAGE_SIZE * | start and length big enough * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus. * |------------------------| * | local pg_data_t * | * |------------------------| * | local ia64_node_data | * |------------------------| * | ??? | * |________________________| * * Once this space has been set aside, the bootmem maps are initialized. We * could probably move the allocation of the per-cpu and ia64_node_data space * outside of this function and use alloc_bootmem_node(), but doing it here * is straightforward and we get the alignments we want so... */ static int __init find_pernode_space(unsigned long start, unsigned long len, int node) { unsigned long spfn, epfn; unsigned long pernodesize = 0, pernode, pages, mapsize; struct bootmem_data *bdp = &bootmem_node_data[node]; spfn = start >> PAGE_SHIFT; epfn = (start + len) >> PAGE_SHIFT; pages = bdp->node_low_pfn - bdp->node_min_pfn; mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT; /* * Make sure this memory falls within this node's usable memory * since we may have thrown some away in build_maps(). */ if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn) return 0; /* Don't setup this node's local space twice... */ if (mem_data[node].pernode_addr) return 0; /* * Calculate total size needed, incl. what's necessary * for good alignment and alias prevention. */ pernodesize = compute_pernodesize(node); pernode = NODEDATA_ALIGN(start, node); /* Is this range big enough for what we want to store here? */ if (start + len > (pernode + pernodesize + mapsize)) fill_pernode(node, pernode, pernodesize); return 0; } /** * free_node_bootmem - free bootmem allocator memory for use * @start: physical start of range * @len: length of range * @node: node where this range resides * * Simply calls the bootmem allocator to free the specified ranged from * the given pg_data_t's bdata struct. After this function has been called * for all the entries in the EFI memory map, the bootmem allocator will * be ready to service allocation requests. */ static int __init free_node_bootmem(unsigned long start, unsigned long len, int node) { free_bootmem_node(pgdat_list[node], start, len); return 0; } /** * reserve_pernode_space - reserve memory for per-node space * * Reserve the space used by the bootmem maps & per-node space in the boot * allocator so that when we actually create the real mem maps we don't * use their memory. */ static void __init reserve_pernode_space(void) { unsigned long base, size, pages; struct bootmem_data *bdp; int node; for_each_online_node(node) { pg_data_t *pdp = pgdat_list[node]; if (node_isset(node, memory_less_mask)) continue; bdp = pdp->bdata; /* First the bootmem_map itself */ pages = bdp->node_low_pfn - bdp->node_min_pfn; size = bootmem_bootmap_pages(pages) << PAGE_SHIFT; base = __pa(bdp->node_bootmem_map); reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT); /* Now the per-node space */ size = mem_data[node].pernode_size; base = __pa(mem_data[node].pernode_addr); reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT); } } static void __meminit scatter_node_data(void) { pg_data_t **dst; int node; /* * for_each_online_node() can't be used at here. * node_online_map is not set for hot-added nodes at this time, * because we are halfway through initialization of the new node's * structures. If for_each_online_node() is used, a new node's * pg_data_ptrs will be not initialized. Instead of using it, * pgdat_list[] is checked. */ for_each_node(node) { if (pgdat_list[node]) { dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; memcpy(dst, pgdat_list, sizeof(pgdat_list)); } } } /** * initialize_pernode_data - fixup per-cpu & per-node pointers * * Each node's per-node area has a copy of the global pg_data_t list, so * we copy that to each node here, as well as setting the per-cpu pointer * to the local node data structure. The active_cpus field of the per-node * structure gets setup by the platform_cpu_init() function later. */ static void __init initialize_pernode_data(void) { int cpu, node; scatter_node_data(); #ifdef CONFIG_SMP /* Set the node_data pointer for each per-cpu struct */ for_each_possible_early_cpu(cpu) { node = node_cpuid[cpu].nid; per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; } #else { struct cpuinfo_ia64 *cpu0_cpu_info; cpu = 0; node = node_cpuid[cpu].nid; cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + ((char *)&per_cpu__cpu_info - __per_cpu_start)); cpu0_cpu_info->node_data = mem_data[node].node_data; } #endif /* CONFIG_SMP */ } /** * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit * node but fall back to any other node when __alloc_bootmem_node fails * for best. * @nid: node id * @pernodesize: size of this node's pernode data */ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) { void *ptr = NULL; u8 best = 0xff; int bestnode = -1, node, anynode = 0; for_each_online_node(node) { if (node_isset(node, memory_less_mask)) continue; else if (node_distance(nid, node) < best) { best = node_distance(nid, node); bestnode = node; } anynode = node; } if (bestnode == -1) bestnode = anynode; ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); return ptr; } /** * memory_less_nodes - allocate and initialize CPU only nodes pernode * information. */ static void __init memory_less_nodes(void) { unsigned long pernodesize; void *pernode; int node; for_each_node_mask(node, memory_less_mask) { pernodesize = compute_pernodesize(node); pernode = memory_less_node_alloc(node, pernodesize); fill_pernode(node, __pa(pernode), pernodesize); } return; } /** * find_memory - walk the EFI memory map and setup the bootmem allocator * * Called early in boot to setup the bootmem allocator, and to * allocate the per-cpu and per-node structures. */ void __init find_memory(void) { int node; reserve_memory(); if (num_online_nodes() == 0) { printk(KERN_ERR "node info missing!\n"); node_set_online(0); } nodes_or(memory_less_mask, memory_less_mask, node_online_map); min_low_pfn = -1; max_low_pfn = 0; /* These actually end up getting called by call_pernode_memory() */ efi_memmap_walk(filter_rsvd_memory, build_node_maps); efi_memmap_walk(filter_rsvd_memory, find_pernode_space); efi_memmap_walk(find_max_min_low_pfn, NULL); for_each_online_node(node) if (bootmem_node_data[node].node_low_pfn) { node_clear(node, memory_less_mask); mem_data[node].min_pfn = ~0UL; } efi_memmap_walk(filter_memory, register_active_ranges); /* * Initialize the boot memory maps in reverse order since that's * what the bootmem allocator expects */ for (node = MAX_NUMNODES - 1; node >= 0; node--) { unsigned long pernode, pernodesize, map; struct bootmem_data *bdp; if (!node_online(node)) continue; else if (node_isset(node, memory_less_mask)) continue; bdp = &bootmem_node_data[node]; pernode = mem_data[node].pernode_addr; pernodesize = mem_data[node].pernode_size; map = pernode + pernodesize; init_bootmem_node(pgdat_list[node], map>>PAGE_SHIFT, bdp->node_min_pfn, bdp->node_low_pfn); } efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); reserve_pernode_space(); memory_less_nodes(); initialize_pernode_data(); max_pfn = max_low_pfn; find_initrd(); } #ifdef CONFIG_SMP /** * per_cpu_init - setup per-cpu variables * * find_pernode_space() does most of this already, we just need to set * local_per_cpu_offset */ void __cpuinit *per_cpu_init(void) { int cpu; static int first_time = 1; if (first_time) { first_time = 0; for_each_possible_early_cpu(cpu) per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; } return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; } #endif /* CONFIG_SMP */ /** * show_mem - give short summary of memory stats * * Shows a simple page count of reserved and used pages in the system. * For discontig machines, it does this on a per-pgdat basis. */ void show_mem(void) { int i, total_reserved = 0; int total_shared = 0, total_cached = 0; unsigned long total_present = 0; pg_data_t *pgdat; printk(KERN_INFO "Mem-info:\n"); show_free_areas(); printk(KERN_INFO "Node memory in pages:\n"); for_each_online_pgdat(pgdat) { unsigned long present; unsigned long flags; int shared = 0, cached = 0, reserved = 0; pgdat_resize_lock(pgdat, &flags); present = pgdat->node_present_pages; for(i = 0; i < pgdat->node_spanned_pages; i++) { struct page *page; if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) touch_nmi_watchdog(); if (pfn_valid(pgdat->node_start_pfn + i)) page = pfn_to_page(pgdat->node_start_pfn + i); else { i = vmemmap_find_next_valid_pfn(pgdat->node_id, i) - 1; continue; } if (PageReserved(page)) reserved++; else if (PageSwapCache(page)) cached++; else if (page_count(page)) shared += page_count(page)-1; } pgdat_resize_unlock(pgdat, &flags); total_present += present; total_reserved += reserved; total_cached += cached; total_shared += shared; printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " "shrd: %10d, swpd: %10d\n", pgdat->node_id, present, reserved, shared, cached); } printk(KERN_INFO "%ld pages of RAM\n", total_present); printk(KERN_INFO "%d reserved pages\n", total_reserved); printk(KERN_INFO "%d pages shared\n", total_shared); printk(KERN_INFO "%d pages swap cached\n", total_cached); printk(KERN_INFO "Total of %ld pages in page table cache\n", quicklist_total_size()); printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); } /** * call_pernode_memory - use SRAT to call callback functions with node info * @start: physical start of range * @len: length of range * @arg: function to call for each range * * efi_memmap_walk() knows nothing about layout of memory across nodes. Find * out to which node a block of memory belongs. Ignore memory that we cannot * identify, and split blocks that run across multiple nodes. * * Take this opportunity to round the start address up and the end address * down to page boundaries. */ void call_pernode_memory(unsigned long start, unsigned long len, void *arg) { unsigned long rs, re, end = start + len; void (*func)(unsigned long, unsigned long, int); int i; start = PAGE_ALIGN(start); end &= PAGE_MASK; if (start >= end) return; func = arg; if (!num_node_memblks) { /* No SRAT table, so assume one node (node 0) */ if (start < end) (*func)(start, end - start, 0); return; } for (i = 0; i < num_node_memblks; i++) { rs = max(start, node_memblk[i].start_paddr); re = min(end, node_memblk[i].start_paddr + node_memblk[i].size); if (rs < re) (*func)(rs, re - rs, node_memblk[i].nid); if (re == end) break; } } /** * count_node_pages - callback to build per-node memory info structures * @start: physical start of range * @len: length of range * @node: node where this range resides * * Each node has it's own number of physical pages, DMAable pages, start, and * end page frame number. This routine will be called by call_pernode_memory() * for each piece of usable memory and will setup these values for each node. * Very similar to build_maps(). */ static __init int count_node_pages(unsigned long start, unsigned long len, int node) { unsigned long end = start + len; mem_data[node].num_physpages += len >> PAGE_SHIFT; #ifdef CONFIG_ZONE_DMA if (start <= __pa(MAX_DMA_ADDRESS)) mem_data[node].num_dma_physpages += (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; #endif start = GRANULEROUNDDOWN(start); end = GRANULEROUNDUP(end); mem_data[node].max_pfn = max(mem_data[node].max_pfn, end >> PAGE_SHIFT); mem_data[node].min_pfn = min(mem_data[node].min_pfn, start >> PAGE_SHIFT); return 0; } /** * paging_init - setup page tables * * paging_init() sets up the page tables for each node of the system and frees * the bootmem allocator memory for general use. */ void __init paging_init(void) { unsigned long max_dma; unsigned long pfn_offset = 0; unsigned long max_pfn = 0; int node; unsigned long max_zone_pfns[MAX_NR_ZONES]; max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; efi_memmap_walk(filter_rsvd_memory, count_node_pages); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); #ifdef CONFIG_VIRTUAL_MEM_MAP vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page)); vmem_map = (struct page *) vmalloc_end; efi_memmap_walk(create_mem_map_page_table, NULL); printk("Virtual mem_map starts at 0x%p\n", vmem_map); #endif for_each_online_node(node) { num_physpages += mem_data[node].num_physpages; pfn_offset = mem_data[node].min_pfn; #ifdef CONFIG_VIRTUAL_MEM_MAP NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; #endif if (mem_data[node].max_pfn > max_pfn) max_pfn = mem_data[node].max_pfn; } memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = max_dma; #endif max_zone_pfns[ZONE_NORMAL] = max_pfn; free_area_init_nodes(max_zone_pfns); zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } #ifdef CONFIG_MEMORY_HOTPLUG pg_data_t *arch_alloc_nodedata(int nid) { unsigned long size = compute_pernodesize(nid); return kzalloc(size, GFP_KERNEL); } void arch_free_nodedata(pg_data_t *pgdat) { kfree(pgdat); } void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) { pgdat_list[update_node] = update_pgdat; scatter_node_data(); } #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP int __meminit vmemmap_populate(struct page *start_page, unsigned long size, int node) { return vmemmap_populate_basepages(start_page, size, node); } #endif
gpl-2.0
w1ndy/linux
drivers/media/usb/pvrusb2/pvrusb2-std.c
1174
9377
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "pvrusb2-std.h" #include "pvrusb2-debug.h" #include <asm/string.h> #include <linux/slab.h> struct std_name { const char *name; v4l2_std_id id; }; #define CSTD_PAL \ (V4L2_STD_PAL_B| \ V4L2_STD_PAL_B1| \ V4L2_STD_PAL_G| \ V4L2_STD_PAL_H| \ V4L2_STD_PAL_I| \ V4L2_STD_PAL_D| \ V4L2_STD_PAL_D1| \ V4L2_STD_PAL_K| \ V4L2_STD_PAL_M| \ V4L2_STD_PAL_N| \ V4L2_STD_PAL_Nc| \ V4L2_STD_PAL_60) #define CSTD_NTSC \ (V4L2_STD_NTSC_M| \ V4L2_STD_NTSC_M_JP| \ V4L2_STD_NTSC_M_KR| \ V4L2_STD_NTSC_443) #define CSTD_ATSC \ (V4L2_STD_ATSC_8_VSB| \ V4L2_STD_ATSC_16_VSB) #define CSTD_SECAM \ (V4L2_STD_SECAM_B| \ V4L2_STD_SECAM_D| \ V4L2_STD_SECAM_G| \ V4L2_STD_SECAM_H| \ V4L2_STD_SECAM_K| \ V4L2_STD_SECAM_K1| \ V4L2_STD_SECAM_L| \ V4L2_STD_SECAM_LC) #define TSTD_B (V4L2_STD_PAL_B|V4L2_STD_SECAM_B) #define TSTD_B1 (V4L2_STD_PAL_B1) #define TSTD_D (V4L2_STD_PAL_D|V4L2_STD_SECAM_D) #define TSTD_D1 (V4L2_STD_PAL_D1) #define TSTD_G (V4L2_STD_PAL_G|V4L2_STD_SECAM_G) #define TSTD_H (V4L2_STD_PAL_H|V4L2_STD_SECAM_H) #define TSTD_I (V4L2_STD_PAL_I) #define TSTD_K (V4L2_STD_PAL_K|V4L2_STD_SECAM_K) #define TSTD_K1 (V4L2_STD_SECAM_K1) #define TSTD_L (V4L2_STD_SECAM_L) #define TSTD_M (V4L2_STD_PAL_M|V4L2_STD_NTSC_M) #define TSTD_N (V4L2_STD_PAL_N) #define TSTD_Nc (V4L2_STD_PAL_Nc) #define TSTD_60 (V4L2_STD_PAL_60) #define CSTD_ALL (CSTD_PAL|CSTD_NTSC|CSTD_ATSC|CSTD_SECAM) /* Mapping of standard bits to color system */ static const struct std_name std_groups[] = { {"PAL",CSTD_PAL}, {"NTSC",CSTD_NTSC}, {"SECAM",CSTD_SECAM}, {"ATSC",CSTD_ATSC}, }; /* Mapping of standard bits to modulation system */ static const struct std_name std_items[] = { {"B",TSTD_B}, {"B1",TSTD_B1}, {"D",TSTD_D}, {"D1",TSTD_D1}, {"G",TSTD_G}, {"H",TSTD_H}, {"I",TSTD_I}, {"K",TSTD_K}, {"K1",TSTD_K1}, {"L",TSTD_L}, {"LC",V4L2_STD_SECAM_LC}, {"M",TSTD_M}, {"Mj",V4L2_STD_NTSC_M_JP}, {"443",V4L2_STD_NTSC_443}, {"Mk",V4L2_STD_NTSC_M_KR}, {"N",TSTD_N}, {"Nc",TSTD_Nc}, {"60",TSTD_60}, {"8VSB",V4L2_STD_ATSC_8_VSB}, {"16VSB",V4L2_STD_ATSC_16_VSB}, }; // Search an array of std_name structures and return a pointer to the // element with the matching name. static const struct std_name *find_std_name(const struct std_name *arrPtr, unsigned int arrSize, const char *bufPtr, unsigned int bufSize) { unsigned int idx; const struct std_name *p; for (idx = 0; idx < arrSize; idx++) { p = arrPtr + idx; if (strlen(p->name) != bufSize) continue; if (!memcmp(bufPtr,p->name,bufSize)) return p; } return NULL; } int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr, unsigned int bufSize) { v4l2_std_id id = 0; v4l2_std_id cmsk = 0; v4l2_std_id t; int mMode = 0; unsigned int cnt; char ch; const struct std_name *sp; while (bufSize) { if (!mMode) { cnt = 0; while ((cnt < bufSize) && (bufPtr[cnt] != '-')) cnt++; if (cnt >= bufSize) return 0; // No more characters sp = find_std_name(std_groups, ARRAY_SIZE(std_groups), bufPtr,cnt); if (!sp) return 0; // Illegal color system name cnt++; bufPtr += cnt; bufSize -= cnt; mMode = !0; cmsk = sp->id; continue; } cnt = 0; while (cnt < bufSize) { ch = bufPtr[cnt]; if (ch == ';') { mMode = 0; break; } if (ch == '/') break; cnt++; } sp = find_std_name(std_items, ARRAY_SIZE(std_items), bufPtr,cnt); if (!sp) return 0; // Illegal modulation system ID t = sp->id & cmsk; if (!t) return 0; // Specific color + modulation system illegal id |= t; if (cnt < bufSize) cnt++; bufPtr += cnt; bufSize -= cnt; } if (idPtr) *idPtr = id; return !0; } unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize, v4l2_std_id id) { unsigned int idx1,idx2; const struct std_name *ip,*gp; int gfl,cfl; unsigned int c1,c2; cfl = 0; c1 = 0; for (idx1 = 0; idx1 < ARRAY_SIZE(std_groups); idx1++) { gp = std_groups + idx1; gfl = 0; for (idx2 = 0; idx2 < ARRAY_SIZE(std_items); idx2++) { ip = std_items + idx2; if (!(gp->id & ip->id & id)) continue; if (!gfl) { if (cfl) { c2 = scnprintf(bufPtr,bufSize,";"); c1 += c2; bufSize -= c2; bufPtr += c2; } cfl = !0; c2 = scnprintf(bufPtr,bufSize, "%s-",gp->name); gfl = !0; } else { c2 = scnprintf(bufPtr,bufSize,"/"); } c1 += c2; bufSize -= c2; bufPtr += c2; c2 = scnprintf(bufPtr,bufSize, ip->name); c1 += c2; bufSize -= c2; bufPtr += c2; } } return c1; } // Template data for possible enumerated video standards. Here we group // standards which share common frame rates and resolution. static struct v4l2_standard generic_standards[] = { { .id = (TSTD_B|TSTD_B1| TSTD_D|TSTD_D1| TSTD_G| TSTD_H| TSTD_I| TSTD_K|TSTD_K1| TSTD_L| V4L2_STD_SECAM_LC | TSTD_N|TSTD_Nc), .frameperiod = { .numerator = 1, .denominator= 25 }, .framelines = 625, .reserved = {0,0,0,0} }, { .id = (TSTD_M| V4L2_STD_NTSC_M_JP| V4L2_STD_NTSC_M_KR), .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} }, { // This is a total wild guess .id = (TSTD_60), .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} }, { // This is total wild guess .id = V4L2_STD_NTSC_443, .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} } }; static struct v4l2_standard *match_std(v4l2_std_id id) { unsigned int idx; for (idx = 0; idx < ARRAY_SIZE(generic_standards); idx++) { if (generic_standards[idx].id & id) { return generic_standards + idx; } } return NULL; } static int pvr2_std_fill(struct v4l2_standard *std,v4l2_std_id id) { struct v4l2_standard *template; int idx; unsigned int bcnt; template = match_std(id); if (!template) return 0; idx = std->index; memcpy(std,template,sizeof(*template)); std->index = idx; std->id = id; bcnt = pvr2_std_id_to_str(std->name,sizeof(std->name)-1,id); std->name[bcnt] = 0; pvr2_trace(PVR2_TRACE_STD,"Set up standard idx=%u name=%s", std->index,std->name); return !0; } /* These are special cases of combined standards that we should enumerate separately if the component pieces are present. */ static v4l2_std_id std_mixes[] = { V4L2_STD_PAL_B | V4L2_STD_PAL_G, V4L2_STD_PAL_D | V4L2_STD_PAL_K, V4L2_STD_SECAM_B | V4L2_STD_SECAM_G, V4L2_STD_SECAM_D | V4L2_STD_SECAM_K, }; struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr, v4l2_std_id id) { unsigned int std_cnt = 0; unsigned int idx,bcnt,idx2; v4l2_std_id idmsk,cmsk,fmsk; struct v4l2_standard *stddefs; if (pvrusb2_debug & PVR2_TRACE_STD) { char buf[100]; bcnt = pvr2_std_id_to_str(buf,sizeof(buf),id); pvr2_trace( PVR2_TRACE_STD,"Mapping standards mask=0x%x (%.*s)", (int)id,bcnt,buf); } *countptr = 0; std_cnt = 0; fmsk = 0; for (idmsk = 1, cmsk = id; cmsk; idmsk <<= 1) { if (!(idmsk & cmsk)) continue; cmsk &= ~idmsk; if (match_std(idmsk)) { std_cnt++; continue; } fmsk |= idmsk; } for (idx2 = 0; idx2 < ARRAY_SIZE(std_mixes); idx2++) { if ((id & std_mixes[idx2]) == std_mixes[idx2]) std_cnt++; } /* Don't complain about ATSC standard values */ fmsk &= ~CSTD_ATSC; if (fmsk) { char buf[100]; bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk); pvr2_trace( PVR2_TRACE_ERROR_LEGS, "WARNING:" " Failed to classify the following standard(s): %.*s", bcnt,buf); } pvr2_trace(PVR2_TRACE_STD,"Setting up %u unique standard(s)", std_cnt); if (!std_cnt) return NULL; // paranoia stddefs = kzalloc(sizeof(struct v4l2_standard) * std_cnt, GFP_KERNEL); if (!stddefs) return NULL; for (idx = 0; idx < std_cnt; idx++) stddefs[idx].index = idx; idx = 0; /* Enumerate potential special cases */ for (idx2 = 0; (idx2 < ARRAY_SIZE(std_mixes)) && (idx < std_cnt); idx2++) { if (!(id & std_mixes[idx2])) continue; if (pvr2_std_fill(stddefs+idx,std_mixes[idx2])) idx++; } /* Now enumerate individual pieces */ for (idmsk = 1, cmsk = id; cmsk && (idx < std_cnt); idmsk <<= 1) { if (!(idmsk & cmsk)) continue; cmsk &= ~idmsk; if (!pvr2_std_fill(stddefs+idx,idmsk)) continue; idx++; } *countptr = std_cnt; return stddefs; } v4l2_std_id pvr2_std_get_usable(void) { return CSTD_ALL; }
gpl-2.0
lamassu/kernel
drivers/regulator/tps6524x-regulator.c
1430
15878
/* * Regulator driver for TPS6524x PMIC * * Copyright (C) 2010 Texas Instruments * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind, * whether express or implied; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #define REG_LDO_SET 0x0 #define LDO_ILIM_MASK 1 /* 0 = 400-800, 1 = 900-1500 */ #define LDO_VSEL_MASK 0x0f #define LDO2_ILIM_SHIFT 12 #define LDO2_VSEL_SHIFT 4 #define LDO1_ILIM_SHIFT 8 #define LDO1_VSEL_SHIFT 0 #define REG_BLOCK_EN 0x1 #define BLOCK_MASK 1 #define BLOCK_LDO1_SHIFT 0 #define BLOCK_LDO2_SHIFT 1 #define BLOCK_LCD_SHIFT 2 #define BLOCK_USB_SHIFT 3 #define REG_DCDC_SET 0x2 #define DCDC_VDCDC_MASK 0x1f #define DCDC_VDCDC1_SHIFT 0 #define DCDC_VDCDC2_SHIFT 5 #define DCDC_VDCDC3_SHIFT 10 #define REG_DCDC_EN 0x3 #define DCDCDCDC_EN_MASK 0x1 #define DCDCDCDC1_EN_SHIFT 0 #define DCDCDCDC1_PG_MSK BIT(1) #define DCDCDCDC2_EN_SHIFT 2 #define DCDCDCDC2_PG_MSK BIT(3) #define DCDCDCDC3_EN_SHIFT 4 #define DCDCDCDC3_PG_MSK BIT(5) #define REG_USB 0x4 #define USB_ILIM_SHIFT 0 #define USB_ILIM_MASK 0x3 #define USB_TSD_SHIFT 2 #define USB_TSD_MASK 0x3 #define USB_TWARN_SHIFT 4 #define USB_TWARN_MASK 0x3 #define USB_IWARN_SD BIT(6) #define USB_FAST_LOOP BIT(7) #define REG_ALARM 0x5 #define ALARM_LDO1 BIT(0) #define ALARM_DCDC1 BIT(1) #define ALARM_DCDC2 BIT(2) #define ALARM_DCDC3 BIT(3) #define ALARM_LDO2 BIT(4) #define ALARM_USB_WARN BIT(5) #define ALARM_USB_ALARM BIT(6) #define ALARM_LCD BIT(9) #define ALARM_TEMP_WARM BIT(10) #define ALARM_TEMP_HOT BIT(11) #define ALARM_NRST BIT(14) #define ALARM_POWERUP BIT(15) #define REG_INT_ENABLE 0x6 #define INT_LDO1 BIT(0) #define INT_DCDC1 BIT(1) #define INT_DCDC2 BIT(2) #define INT_DCDC3 BIT(3) #define INT_LDO2 BIT(4) #define INT_USB_WARN BIT(5) #define INT_USB_ALARM BIT(6) #define INT_LCD BIT(9) #define INT_TEMP_WARM BIT(10) #define INT_TEMP_HOT BIT(11) #define INT_GLOBAL_EN BIT(15) #define REG_INT_STATUS 0x7 #define STATUS_LDO1 BIT(0) #define STATUS_DCDC1 BIT(1) #define STATUS_DCDC2 BIT(2) #define STATUS_DCDC3 BIT(3) #define STATUS_LDO2 BIT(4) #define STATUS_USB_WARN BIT(5) #define STATUS_USB_ALARM BIT(6) #define STATUS_LCD BIT(9) #define STATUS_TEMP_WARM BIT(10) #define STATUS_TEMP_HOT BIT(11) #define REG_SOFTWARE_RESET 0xb #define REG_WRITE_ENABLE 0xd #define REG_REV_ID 0xf #define N_DCDC 3 #define N_LDO 2 #define N_SWITCH 2 #define N_REGULATORS (3 /* DCDC */ + \ 2 /* LDO */ + \ 2 /* switch */) #define FIXED_ILIMSEL BIT(0) #define FIXED_VOLTAGE BIT(1) #define CMD_READ(reg) ((reg) << 6) #define CMD_WRITE(reg) (BIT(5) | (reg) << 6) #define STAT_CLK BIT(3) #define STAT_WRITE BIT(2) #define STAT_INVALID BIT(1) #define STAT_WP BIT(0) struct field { int reg; int shift; int mask; }; struct supply_info { const char *name; int n_voltages; const int *voltages; int fixed_voltage; int n_ilimsels; const int *ilimsels; int fixed_ilimsel; int flags; struct field enable, voltage, ilimsel; }; struct tps6524x { struct device *dev; struct spi_device *spi; struct mutex lock; struct regulator_desc desc[N_REGULATORS]; struct regulator_dev *rdev[N_REGULATORS]; }; static int __read_reg(struct tps6524x *hw, int reg) { int error = 0; u16 cmd = CMD_READ(reg), in; u8 status; struct spi_message m; struct spi_transfer t[3]; spi_message_init(&m); memset(t, 0, sizeof(t)); t[0].tx_buf = &cmd; t[0].len = 2; t[0].bits_per_word = 12; spi_message_add_tail(&t[0], &m); t[1].rx_buf = &in; t[1].len = 2; t[1].bits_per_word = 16; spi_message_add_tail(&t[1], &m); t[2].rx_buf = &status; t[2].len = 1; t[2].bits_per_word = 4; spi_message_add_tail(&t[2], &m); error = spi_sync(hw->spi, &m); if (error < 0) return error; dev_dbg(hw->dev, "read reg %d, data %x, status %x\n", reg, in, status); if (!(status & STAT_CLK) || (status & STAT_WRITE)) return -EIO; if (status & STAT_INVALID) return -EINVAL; return in; } static int read_reg(struct tps6524x *hw, int reg) { int ret; mutex_lock(&hw->lock); ret = __read_reg(hw, reg); mutex_unlock(&hw->lock); return ret; } static int __write_reg(struct tps6524x *hw, int reg, int val) { int error = 0; u16 cmd = CMD_WRITE(reg), out = val; u8 status; struct spi_message m; struct spi_transfer t[3]; spi_message_init(&m); memset(t, 0, sizeof(t)); t[0].tx_buf = &cmd; t[0].len = 2; t[0].bits_per_word = 12; spi_message_add_tail(&t[0], &m); t[1].tx_buf = &out; t[1].len = 2; t[1].bits_per_word = 16; spi_message_add_tail(&t[1], &m); t[2].rx_buf = &status; t[2].len = 1; t[2].bits_per_word = 4; spi_message_add_tail(&t[2], &m); error = spi_sync(hw->spi, &m); if (error < 0) return error; dev_dbg(hw->dev, "wrote reg %d, data %x, status %x\n", reg, out, status); if (!(status & STAT_CLK) || !(status & STAT_WRITE)) return -EIO; if (status & (STAT_INVALID | STAT_WP)) return -EINVAL; return error; } static int __rmw_reg(struct tps6524x *hw, int reg, int mask, int val) { int ret; ret = __read_reg(hw, reg); if (ret < 0) return ret; ret &= ~mask; ret |= val; ret = __write_reg(hw, reg, ret); return (ret < 0) ? ret : 0; } static int rmw_protect(struct tps6524x *hw, int reg, int mask, int val) { int ret; mutex_lock(&hw->lock); ret = __write_reg(hw, REG_WRITE_ENABLE, 1); if (ret) { dev_err(hw->dev, "failed to set write enable\n"); goto error; } ret = __rmw_reg(hw, reg, mask, val); if (ret) dev_err(hw->dev, "failed to rmw register %d\n", reg); ret = __write_reg(hw, REG_WRITE_ENABLE, 0); if (ret) { dev_err(hw->dev, "failed to clear write enable\n"); goto error; } error: mutex_unlock(&hw->lock); return ret; } static int read_field(struct tps6524x *hw, const struct field *field) { int tmp; tmp = read_reg(hw, field->reg); if (tmp < 0) return tmp; return (tmp >> field->shift) & field->mask; } static int write_field(struct tps6524x *hw, const struct field *field, int val) { if (val & ~field->mask) return -EOVERFLOW; return rmw_protect(hw, field->reg, field->mask << field->shift, val << field->shift); } static const int dcdc1_voltages[] = { 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, 1525000, 1550000, 1575000, }; static const int dcdc2_voltages[] = { 1400000, 1450000, 1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000, 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000, 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000, 2800000, 2850000, 2900000, 2950000, }; static const int dcdc3_voltages[] = { 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000, 2800000, 2850000, 2900000, 2950000, 3000000, 3050000, 3100000, 3150000, 3200000, 3250000, 3300000, 3350000, 3400000, 3450000, 3500000, 3550000, 3600000, }; static const int ldo1_voltages[] = { 4300000, 4350000, 4400000, 4450000, 4500000, 4550000, 4600000, 4650000, 4700000, 4750000, 4800000, 4850000, 4900000, 4950000, 5000000, 5050000, }; static const int ldo2_voltages[] = { 1100000, 1150000, 1200000, 1250000, 1300000, 1700000, 1750000, 1800000, 1850000, 1900000, 3150000, 3200000, 3250000, 3300000, 3350000, 3400000, }; static const int ldo_ilimsel[] = { 400000, 1500000 }; static const int usb_ilimsel[] = { 200000, 400000, 800000, 1000000 }; #define __MK_FIELD(_reg, _mask, _shift) \ { .reg = (_reg), .mask = (_mask), .shift = (_shift), } static const struct supply_info supply_info[N_REGULATORS] = { { .name = "DCDC1", .flags = FIXED_ILIMSEL, .n_voltages = ARRAY_SIZE(dcdc1_voltages), .voltages = dcdc1_voltages, .fixed_ilimsel = 2400000, .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK, DCDCDCDC1_EN_SHIFT), .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK, DCDC_VDCDC1_SHIFT), }, { .name = "DCDC2", .flags = FIXED_ILIMSEL, .n_voltages = ARRAY_SIZE(dcdc2_voltages), .voltages = dcdc2_voltages, .fixed_ilimsel = 1200000, .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK, DCDCDCDC2_EN_SHIFT), .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK, DCDC_VDCDC2_SHIFT), }, { .name = "DCDC3", .flags = FIXED_ILIMSEL, .n_voltages = ARRAY_SIZE(dcdc3_voltages), .voltages = dcdc3_voltages, .fixed_ilimsel = 1200000, .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK, DCDCDCDC3_EN_SHIFT), .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK, DCDC_VDCDC3_SHIFT), }, { .name = "LDO1", .n_voltages = ARRAY_SIZE(ldo1_voltages), .voltages = ldo1_voltages, .n_ilimsels = ARRAY_SIZE(ldo_ilimsel), .ilimsels = ldo_ilimsel, .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK, BLOCK_LDO1_SHIFT), .voltage = __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK, LDO1_VSEL_SHIFT), .ilimsel = __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK, LDO1_ILIM_SHIFT), }, { .name = "LDO2", .n_voltages = ARRAY_SIZE(ldo2_voltages), .voltages = ldo2_voltages, .n_ilimsels = ARRAY_SIZE(ldo_ilimsel), .ilimsels = ldo_ilimsel, .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK, BLOCK_LDO2_SHIFT), .voltage = __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK, LDO2_VSEL_SHIFT), .ilimsel = __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK, LDO2_ILIM_SHIFT), }, { .name = "USB", .flags = FIXED_VOLTAGE, .fixed_voltage = 5000000, .n_ilimsels = ARRAY_SIZE(usb_ilimsel), .ilimsels = usb_ilimsel, .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK, BLOCK_USB_SHIFT), .ilimsel = __MK_FIELD(REG_USB, USB_ILIM_MASK, USB_ILIM_SHIFT), }, { .name = "LCD", .flags = FIXED_VOLTAGE | FIXED_ILIMSEL, .fixed_voltage = 5000000, .fixed_ilimsel = 400000, .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK, BLOCK_LCD_SHIFT), }, }; static int list_voltage(struct regulator_dev *rdev, unsigned selector) { const struct supply_info *info; struct tps6524x *hw; hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; if (info->flags & FIXED_VOLTAGE) return selector ? -EINVAL : info->fixed_voltage; return ((selector < info->n_voltages) ? info->voltages[selector] : -EINVAL); } static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { const struct supply_info *info; struct tps6524x *hw; unsigned i; hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; if (info->flags & FIXED_VOLTAGE) return -EINVAL; for (i = 0; i < info->n_voltages; i++) if (min_uV <= info->voltages[i] && max_uV >= info->voltages[i]) break; if (i >= info->n_voltages) i = info->n_voltages - 1; *selector = info->voltages[i]; return write_field(hw, &info->voltage, i); } static int get_voltage(struct regulator_dev *rdev) { const struct supply_info *info; struct tps6524x *hw; int ret; hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; if (info->flags & FIXED_VOLTAGE) return info->fixed_voltage; ret = read_field(hw, &info->voltage); if (ret < 0) return ret; if (WARN_ON(ret >= info->n_voltages)) return -EIO; return info->voltages[ret]; } static int set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA) { const struct supply_info *info; struct tps6524x *hw; int i; hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; if (info->flags & FIXED_ILIMSEL) return -EINVAL; for (i = 0; i < info->n_ilimsels; i++) if (min_uA <= info->ilimsels[i] && max_uA >= info->ilimsels[i]) break; if (i >= info->n_ilimsels) return -EINVAL; return write_field(hw, &info->ilimsel, i); } static int get_current_limit(struct regulator_dev *rdev) { const struct supply_info *info; struct tps6524x *hw; int ret; hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; if (info->flags & FIXED_ILIMSEL) return info->fixed_ilimsel; ret = read_field(hw, &info->ilimsel); if (ret < 0) return ret; if (WARN_ON(ret >= info->n_ilimsels)) return -EIO; return info->ilimsels[ret]; } static int enable_supply(struct regulator_dev *rdev) { const struct supply_info *info; struct tps6524x *hw; hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; return write_field(hw, &info->enable, 1); } static int disable_supply(struct regulator_dev *rdev) { const struct supply_info *info; struct tps6524x *hw; hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; return write_field(hw, &info->enable, 0); } static int is_supply_enabled(struct regulator_dev *rdev) { const struct supply_info *info; struct tps6524x *hw; hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; return read_field(hw, &info->enable); } static struct regulator_ops regulator_ops = { .is_enabled = is_supply_enabled, .enable = enable_supply, .disable = disable_supply, .get_voltage = get_voltage, .set_voltage = set_voltage, .list_voltage = list_voltage, .set_current_limit = set_current_limit, .get_current_limit = get_current_limit, }; static int pmic_remove(struct spi_device *spi) { struct tps6524x *hw = spi_get_drvdata(spi); int i; if (!hw) return 0; for (i = 0; i < N_REGULATORS; i++) { if (hw->rdev[i]) regulator_unregister(hw->rdev[i]); hw->rdev[i] = NULL; } spi_set_drvdata(spi, NULL); kfree(hw); return 0; } static int __devinit pmic_probe(struct spi_device *spi) { struct tps6524x *hw; struct device *dev = &spi->dev; const struct supply_info *info = supply_info; struct regulator_init_data *init_data; int ret = 0, i; init_data = dev->platform_data; if (!init_data) { dev_err(dev, "could not find regulator platform data\n"); return -EINVAL; } hw = kzalloc(sizeof(struct tps6524x), GFP_KERNEL); if (!hw) { dev_err(dev, "cannot allocate regulator private data\n"); return -ENOMEM; } spi_set_drvdata(spi, hw); memset(hw, 0, sizeof(struct tps6524x)); hw->dev = dev; hw->spi = spi_dev_get(spi); mutex_init(&hw->lock); for (i = 0; i < N_REGULATORS; i++, info++, init_data++) { hw->desc[i].name = info->name; hw->desc[i].id = i; hw->desc[i].n_voltages = info->n_voltages; hw->desc[i].ops = &regulator_ops; hw->desc[i].type = REGULATOR_VOLTAGE; hw->desc[i].owner = THIS_MODULE; if (info->flags & FIXED_VOLTAGE) hw->desc[i].n_voltages = 1; hw->rdev[i] = regulator_register(&hw->desc[i], dev, init_data, hw); if (IS_ERR(hw->rdev[i])) { ret = PTR_ERR(hw->rdev[i]); hw->rdev[i] = NULL; goto fail; } } return 0; fail: pmic_remove(spi); return ret; } static struct spi_driver pmic_driver = { .probe = pmic_probe, .remove = __devexit_p(pmic_remove), .driver = { .name = "tps6524x", .owner = THIS_MODULE, }, }; static int __init pmic_driver_init(void) { return spi_register_driver(&pmic_driver); } module_init(pmic_driver_init); static void __exit pmic_driver_exit(void) { spi_unregister_driver(&pmic_driver); } module_exit(pmic_driver_exit); MODULE_DESCRIPTION("TPS6524X PMIC Driver"); MODULE_AUTHOR("Cyril Chemparathy"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:tps6524x");
gpl-2.0
yukchou/linux-sunxi-1
arch/x86/kernel/acpi/boot.c
1942
40882
/* * boot.c - Architecture-Specific Low-Level ACPI Boot Support * * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/init.h> #include <linux/acpi.h> #include <linux/acpi_pmtmr.h> #include <linux/efi.h> #include <linux/cpumask.h> #include <linux/module.h> #include <linux/dmi.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/bootmem.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/pci_x86.h> #include <asm/pgtable.h> #include <asm/io_apic.h> #include <asm/apic.h> #include <asm/io.h> #include <asm/mpspec.h> #include <asm/smp.h> static int __initdata acpi_force = 0; u32 acpi_rsdt_forced; int acpi_disabled; EXPORT_SYMBOL(acpi_disabled); #ifdef CONFIG_X86_64 # include <asm/proto.h> # include <asm/numa_64.h> #endif /* X86 */ #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) #define PREFIX "ACPI: " int acpi_noirq; /* skip ACPI IRQ initialization */ int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */ EXPORT_SYMBOL(acpi_pci_disabled); int acpi_lapic; int acpi_ioapic; int acpi_strict; u8 acpi_sci_flags __initdata; int acpi_sci_override_gsi __initdata; int acpi_skip_timer_override __initdata; int acpi_use_timer_override __initdata; int acpi_fix_pin2_polarity __initdata; #ifdef CONFIG_X86_LOCAL_APIC static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; #endif #ifndef __HAVE_ARCH_CMPXCHG #warning ACPI uses CMPXCHG, i486 and later hardware #endif /* -------------------------------------------------------------------------- Boot-time Configuration -------------------------------------------------------------------------- */ /* * The default interrupt routing model is PIC (8259). This gets * overridden if IOAPICs are enumerated (below). */ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; /* * ISA irqs by default are the first 16 gsis but can be * any gsi as specified by an interrupt source override. */ static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; static unsigned int gsi_to_irq(unsigned int gsi) { unsigned int irq = gsi + NR_IRQS_LEGACY; unsigned int i; for (i = 0; i < NR_IRQS_LEGACY; i++) { if (isa_irq_to_gsi[i] == gsi) { return i; } } /* Provide an identity mapping of gsi == irq * except on truly weird platforms that have * non isa irqs in the first 16 gsis. */ if (gsi >= NR_IRQS_LEGACY) irq = gsi; else irq = gsi_top + gsi; return irq; } static u32 irq_to_gsi(int irq) { unsigned int gsi; if (irq < NR_IRQS_LEGACY) gsi = isa_irq_to_gsi[irq]; else if (irq < gsi_top) gsi = irq; else if (irq < (gsi_top + NR_IRQS_LEGACY)) gsi = irq - gsi_top; else gsi = 0xffffffff; return gsi; } /* * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, * to map the target physical address. The problem is that set_fixmap() * provides a single page, and it is possible that the page is not * sufficient. * By using this area, we can map up to MAX_IO_APICS pages temporarily, * i.e. until the next __va_range() call. * * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and * count idx down while incrementing the phys address. */ char *__init __acpi_map_table(unsigned long phys, unsigned long size) { if (!phys || !size) return NULL; return early_ioremap(phys, size); } void __init __acpi_unmap_table(char *map, unsigned long size) { if (!map || !size) return; early_iounmap(map, size); } #ifdef CONFIG_X86_LOCAL_APIC static int __init acpi_parse_madt(struct acpi_table_header *table) { struct acpi_table_madt *madt = NULL; if (!cpu_has_apic) return -EINVAL; madt = (struct acpi_table_madt *)table; if (!madt) { printk(KERN_WARNING PREFIX "Unable to map MADT\n"); return -ENODEV; } if (madt->address) { acpi_lapic_addr = (u64) madt->address; printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", madt->address); } default_acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); return 0; } static void __cpuinit acpi_register_lapic(int id, u8 enabled) { unsigned int ver = 0; if (id >= (MAX_LOCAL_APIC-1)) { printk(KERN_INFO PREFIX "skipped apicid that is too big\n"); return; } if (!enabled) { ++disabled_cpus; return; } if (boot_cpu_physical_apicid != -1U) ver = apic_version[boot_cpu_physical_apicid]; generic_processor_info(id, ver); } static int __init acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_x2apic *processor = NULL; int apic_id; u8 enabled; processor = (struct acpi_madt_local_x2apic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); apic_id = processor->local_apic_id; enabled = processor->lapic_flags & ACPI_MADT_ENABLED; #ifdef CONFIG_X86_X2APIC /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ if (!apic->apic_id_valid(apic_id) && enabled) printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); else acpi_register_lapic(apic_id, enabled); #else printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); #endif return 0; } static int __init acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic *processor = NULL; processor = (struct acpi_madt_local_apic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ acpi_register_lapic(processor->id, /* APIC ID */ processor->lapic_flags & ACPI_MADT_ENABLED); return 0; } static int __init acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_sapic *processor = NULL; processor = (struct acpi_madt_local_sapic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ processor->lapic_flags & ACPI_MADT_ENABLED); return 0; } static int __init acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL; lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header; if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) return -EINVAL; acpi_lapic_addr = lapic_addr_ovr->address; return 0; } static int __init acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL; x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header; if (BAD_MADT_ENTRY(x2apic_nmi, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (x2apic_nmi->lint != 1) printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); return 0; } static int __init acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_nmi *lapic_nmi = NULL; lapic_nmi = (struct acpi_madt_local_apic_nmi *)header; if (BAD_MADT_ENTRY(lapic_nmi, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (lapic_nmi->lint != 1) printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); return 0; } #endif /*CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_IO_APIC static int __init acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_io_apic *ioapic = NULL; ioapic = (struct acpi_madt_io_apic *)header; if (BAD_MADT_ENTRY(ioapic, end)) return -EINVAL; acpi_table_print_madt_entry(header); mp_register_ioapic(ioapic->id, ioapic->address, ioapic->global_irq_base); return 0; } /* * Parse Interrupt Source Override for the ACPI SCI */ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi) { if (trigger == 0) /* compatible SCI trigger is level */ trigger = 3; if (polarity == 0) /* compatible SCI polarity is low */ polarity = 3; /* Command-line over-ride via acpi_sci= */ if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2; if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; /* * mp_config_acpi_legacy_irqs() already setup IRQs < 16 * If GSI is < 16, this will update its flags, * else it will create a new mp_irqs[] entry. */ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); /* * stash over-ride to indicate we've been here * and for later update of acpi_gbl_FADT */ acpi_sci_override_gsi = gsi; return; } static int __init acpi_parse_int_src_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_interrupt_override *intsrc = NULL; intsrc = (struct acpi_madt_interrupt_override *)header; if (BAD_MADT_ENTRY(intsrc, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { acpi_sci_ioapic_setup(intsrc->source_irq, intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, intsrc->global_irq); return 0; } if (intsrc->source_irq == 0) { if (acpi_skip_timer_override) { printk(PREFIX "BIOS IRQ0 override ignored.\n"); return 0; } if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); } } mp_override_legacy_irq(intsrc->source_irq, intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, intsrc->global_irq); return 0; } static int __init acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_nmi_source *nmi_src = NULL; nmi_src = (struct acpi_madt_nmi_source *)header; if (BAD_MADT_ENTRY(nmi_src, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* TBD: Support nimsrc entries? */ return 0; } #endif /* CONFIG_X86_IO_APIC */ /* * acpi_pic_sci_set_trigger() * * use ELCR to set PIC-mode trigger type for SCI * * If a PIC-mode SCI is not recognized or gives spurious IRQ7's * it may require Edge Trigger -- use "acpi_sci=edge" * * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0) * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0) */ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) { unsigned int mask = 1 << irq; unsigned int old, new; /* Real old ELCR mask */ old = inb(0x4d0) | (inb(0x4d1) << 8); /* * If we use ACPI to set PCI IRQs, then we should clear ELCR * since we will set it correctly as we enable the PCI irq * routing. */ new = acpi_noirq ? old : 0; /* * Update SCI information in the ELCR, it isn't in the PCI * routing tables.. */ switch (trigger) { case 1: /* Edge - clear */ new &= ~mask; break; case 3: /* Level - set */ new |= mask; break; } if (old == new) return; printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old); outb(new, 0x4d0); outb(new >> 8, 0x4d1); } int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) { *irq = gsi_to_irq(gsi); #ifdef CONFIG_X86_IO_APIC if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) setup_IO_APIC_irq_extra(gsi); #endif return 0; } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) { if (isa_irq >= 16) return -1; *gsi = irq_to_gsi(isa_irq); return 0; } static int acpi_register_gsi_pic(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_PCI /* * Make sure all (legacy) PCI IRQs are set as level-triggered. */ if (trigger == ACPI_LEVEL_SENSITIVE) eisa_set_level_irq(gsi); #endif return gsi; } static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_X86_IO_APIC gsi = mp_register_gsi(dev, gsi, trigger, polarity); #endif return gsi; } int (*__acpi_register_gsi)(struct device *dev, u32 gsi, int trigger, int polarity) = acpi_register_gsi_pic; /* * success: return IRQ number (>=0) * failure: return < 0 */ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { unsigned int irq; unsigned int plat_gsi = gsi; plat_gsi = (*__acpi_register_gsi)(dev, gsi, trigger, polarity); irq = gsi_to_irq(plat_gsi); return irq; } void __init acpi_set_irq_model_pic(void) { acpi_irq_model = ACPI_IRQ_MODEL_PIC; __acpi_register_gsi = acpi_register_gsi_pic; acpi_ioapic = 0; } void __init acpi_set_irq_model_ioapic(void) { acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; __acpi_register_gsi = acpi_register_gsi_ioapic; acpi_ioapic = 1; } /* * ACPI based hotplug support for CPU */ #ifdef CONFIG_ACPI_HOTPLUG_CPU #include <acpi/processor.h> static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA int nid; nid = acpi_get_node(handle); if (nid == -1 || !node_online(nid)) return; set_apicid_to_node(physid, nid); numa_set_node(cpu, nid); #endif } static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_madt_local_apic *lapic; cpumask_var_t tmp_map, new_map; u8 physid; int cpu; int retval = -ENOMEM; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) return -EINVAL; if (!buffer.length || !buffer.pointer) return -EINVAL; obj = buffer.pointer; if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < sizeof(*lapic)) { kfree(buffer.pointer); return -EINVAL; } lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer; if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC || !(lapic->lapic_flags & ACPI_MADT_ENABLED)) { kfree(buffer.pointer); return -EINVAL; } physid = lapic->id; kfree(buffer.pointer); buffer.length = ACPI_ALLOCATE_BUFFER; buffer.pointer = NULL; lapic = NULL; if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL)) goto out; if (!alloc_cpumask_var(&new_map, GFP_KERNEL)) goto free_tmp_map; cpumask_copy(tmp_map, cpu_present_mask); acpi_register_lapic(physid, ACPI_MADT_ENABLED); /* * If mp_register_lapic successfully generates a new logical cpu * number, then the following will get us exactly what was mapped */ cpumask_andnot(new_map, cpu_present_mask, tmp_map); if (cpumask_empty(new_map)) { printk ("Unable to map lapic to logical cpu number\n"); retval = -EINVAL; goto free_new_map; } acpi_processor_set_pdc(handle); cpu = cpumask_first(new_map); acpi_map_cpu2node(handle, cpu, physid); *pcpu = cpu; retval = 0; free_new_map: free_cpumask_var(new_map); free_tmp_map: free_cpumask_var(tmp_map); out: return retval; } /* wrapper to silence section mismatch warning */ int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu) { return _acpi_map_lsapic(handle, pcpu); } EXPORT_SYMBOL(acpi_map_lsapic); int acpi_unmap_lsapic(int cpu) { per_cpu(x86_cpu_to_apicid, cpu) = -1; set_cpu_present(cpu, false); num_processors--; return (0); } EXPORT_SYMBOL(acpi_unmap_lsapic); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) { /* TBD */ return -EINVAL; } EXPORT_SYMBOL(acpi_register_ioapic); int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) { /* TBD */ return -EINVAL; } EXPORT_SYMBOL(acpi_unregister_ioapic); static int __init acpi_parse_sbf(struct acpi_table_header *table) { struct acpi_table_boot *sb; sb = (struct acpi_table_boot *)table; if (!sb) { printk(KERN_WARNING PREFIX "Unable to map SBF\n"); return -ENODEV; } sbf_port = sb->cmos_index; /* Save CMOS port */ return 0; } #ifdef CONFIG_HPET_TIMER #include <asm/hpet.h> static struct __initdata resource *hpet_res; static int __init acpi_parse_hpet(struct acpi_table_header *table) { struct acpi_table_hpet *hpet_tbl; hpet_tbl = (struct acpi_table_hpet *)table; if (!hpet_tbl) { printk(KERN_WARNING PREFIX "Unable to map HPET\n"); return -ENODEV; } if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) { printk(KERN_WARNING PREFIX "HPET timers must be located in " "memory.\n"); return -1; } hpet_address = hpet_tbl->address.address; hpet_blockid = hpet_tbl->sequence; /* * Some broken BIOSes advertise HPET at 0x0. We really do not * want to allocate a resource there. */ if (!hpet_address) { printk(KERN_WARNING PREFIX "HPET id: %#x base: %#lx is invalid\n", hpet_tbl->id, hpet_address); return 0; } #ifdef CONFIG_X86_64 /* * Some even more broken BIOSes advertise HPET at * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add * some noise: */ if (hpet_address == 0xfed0000000000000UL) { if (!hpet_force_user) { printk(KERN_WARNING PREFIX "HPET id: %#x " "base: 0xfed0000000000000 is bogus\n " "try hpet=force on the kernel command line to " "fix it up to 0xfed00000.\n", hpet_tbl->id); hpet_address = 0; return 0; } printk(KERN_WARNING PREFIX "HPET id: %#x base: 0xfed0000000000000 fixed up " "to 0xfed00000.\n", hpet_tbl->id); hpet_address >>= 32; } #endif printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet_address); /* * Allocate and initialize the HPET firmware resource for adding into * the resource tree during the lateinit timeframe. */ #define HPET_RESOURCE_NAME_SIZE 9 hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); hpet_res->name = (void *)&hpet_res[1]; hpet_res->flags = IORESOURCE_MEM; snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u", hpet_tbl->sequence); hpet_res->start = hpet_address; hpet_res->end = hpet_address + (1 * 1024) - 1; return 0; } /* * hpet_insert_resource inserts the HPET resources used into the resource * tree. */ static __init int hpet_insert_resource(void) { if (!hpet_res) return 1; return insert_resource(&iomem_resource, hpet_res); } late_initcall(hpet_insert_resource); #else #define acpi_parse_hpet NULL #endif static int __init acpi_parse_fadt(struct acpi_table_header *table) { #ifdef CONFIG_X86_PM_TIMER /* detect the location of the ACPI PM Timer */ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) { /* FADT rev. 2 */ if (acpi_gbl_FADT.xpm_timer_block.space_id != ACPI_ADR_SPACE_SYSTEM_IO) return 0; pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address; /* * "X" fields are optional extensions to the original V1.0 * fields, so we must selectively expand V1.0 fields if the * corresponding X field is zero. */ if (!pmtmr_ioport) pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; } else { /* FADT rev. 1 */ pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; } if (pmtmr_ioport) printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", pmtmr_ioport); #endif return 0; } #ifdef CONFIG_X86_LOCAL_APIC /* * Parse LAPIC entries in MADT * returns 0 on success, < 0 on error */ static int __init early_acpi_parse_madt_lapic_addr_ovr(void) { int count; if (!cpu_has_apic) return -ENODEV; /* * Note that the LAPIC address is obtained from the MADT (32-bit value) * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). */ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); return count; } register_lapic_address(acpi_lapic_addr); return count; } static int __init acpi_parse_madt_lapic_entries(void) { int count; int x2count = 0; if (!cpu_has_apic) return -ENODEV; /* * Note that the LAPIC address is obtained from the MADT (32-bit value) * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). */ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); return count; } register_lapic_address(acpi_lapic_addr); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_sapic, MAX_LOCAL_APIC); if (!count) { x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, acpi_parse_x2apic, MAX_LOCAL_APIC); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, MAX_LOCAL_APIC); } if (!count && !x2count) { printk(KERN_ERR PREFIX "No LAPIC entries present\n"); /* TBD: Cleanup to allow fallback to MPS */ return -ENODEV; } else if (count < 0 || x2count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI, acpi_parse_x2apic_nmi, 0); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0); if (count < 0 || x2count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } return 0; } #endif /* CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_IO_APIC #define MP_ISA_BUS 0 #ifdef CONFIG_X86_ES7000 extern int es7000_plat; #endif void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { int ioapic; int pin; struct mpc_intsrc mp_irq; /* * Convert 'gsi' to 'ioapic.pin'. */ ioapic = mp_find_ioapic(gsi); if (ioapic < 0) return; pin = mp_find_ioapic_pin(ioapic, gsi); /* * TBD: This check is for faulty timer entries, where the override * erroneously sets the trigger to level, resulting in a HUGE * increase of timer interrupts! */ if ((bus_irq == 0) && (trigger == 3)) trigger = 1; mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; mp_irq.irqflag = (trigger << 2) | polarity; mp_irq.srcbus = MP_ISA_BUS; mp_irq.srcbusirq = bus_irq; /* IRQ */ mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */ mp_irq.dstirq = pin; /* INTIN# */ mp_save_irq(&mp_irq); isa_irq_to_gsi[bus_irq] = gsi; } void __init mp_config_acpi_legacy_irqs(void) { int i; struct mpc_intsrc mp_irq; #if defined (CONFIG_MCA) || defined (CONFIG_EISA) /* * Fabricate the legacy ISA bus (bus #31). */ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; #endif set_bit(MP_ISA_BUS, mp_bus_not_pci); pr_debug("Bus #%d is ISA\n", MP_ISA_BUS); #ifdef CONFIG_X86_ES7000 /* * Older generations of ES7000 have no legacy identity mappings */ if (es7000_plat == 1) return; #endif /* * Use the default configuration for the IRQs 0-15. Unless * overridden by (MADT) interrupt source override entries. */ for (i = 0; i < 16; i++) { int ioapic, pin; unsigned int dstapic; int idx; u32 gsi; /* Locate the gsi that irq i maps to. */ if (acpi_isa_irq_to_gsi(i, &gsi)) continue; /* * Locate the IOAPIC that manages the ISA IRQ. */ ioapic = mp_find_ioapic(gsi); if (ioapic < 0) continue; pin = mp_find_ioapic_pin(ioapic, gsi); dstapic = mpc_ioapic_id(ioapic); for (idx = 0; idx < mp_irq_entries; idx++) { struct mpc_intsrc *irq = mp_irqs + idx; /* Do we already have a mapping for this ISA IRQ? */ if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i) break; /* Do we already have a mapping for this IOAPIC pin */ if (irq->dstapic == dstapic && irq->dstirq == pin) break; } if (idx != mp_irq_entries) { printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); continue; /* IRQ already used */ } mp_irq.type = MP_INTSRC; mp_irq.irqflag = 0; /* Conforming */ mp_irq.srcbus = MP_ISA_BUS; mp_irq.dstapic = dstapic; mp_irq.irqtype = mp_INT; mp_irq.srcbusirq = i; /* Identity mapped */ mp_irq.dstirq = pin; mp_save_irq(&mp_irq); } } static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_X86_MPPARSE struct mpc_intsrc mp_irq; struct pci_dev *pdev; unsigned char number; unsigned int devfn; int ioapic; u8 pin; if (!acpi_ioapic) return 0; if (!dev) return 0; if (dev->bus != &pci_bus_type) return 0; pdev = to_pci_dev(dev); number = pdev->bus->number; devfn = pdev->devfn; pin = pdev->pin; /* print the entry should happen on mptable identically */ mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); mp_irq.srcbus = number; mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); ioapic = mp_find_ioapic(gsi); mp_irq.dstapic = mpc_ioapic_id(ioapic); mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi); mp_save_irq(&mp_irq); #endif return 0; } int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { int ioapic; int ioapic_pin; struct io_apic_irq_attr irq_attr; if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) return gsi; /* Don't set up the ACPI SCI because it's already set up */ if (acpi_gbl_FADT.sci_interrupt == gsi) return gsi; ioapic = mp_find_ioapic(gsi); if (ioapic < 0) { printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); return gsi; } ioapic_pin = mp_find_ioapic_pin(ioapic, gsi); if (ioapic_pin > MP_MAX_IOAPIC_PIN) { printk(KERN_ERR "Invalid reference to IOAPIC pin " "%d-%d\n", mpc_ioapic_id(ioapic), ioapic_pin); return gsi; } if (enable_update_mptable) mp_config_acpi_gsi(dev, gsi, trigger, polarity); set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin, trigger == ACPI_EDGE_SENSITIVE ? 0 : 1, polarity == ACPI_ACTIVE_HIGH ? 0 : 1); io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr); return gsi; } /* * Parse IOAPIC related entries in MADT * returns 0 on success, < 0 on error */ static int __init acpi_parse_madt_ioapic_entries(void) { int count; /* * ACPI interpreter is required to complete interrupt setup, * so if it is off, don't enumerate the io-apics with ACPI. * If MPS is present, it will handle them, * otherwise the system will stay in PIC mode */ if (acpi_disabled || acpi_noirq) return -ENODEV; if (!cpu_has_apic) return -ENODEV; /* * if "noapic" boot option, don't look for IO-APICs */ if (skip_ioapic_setup) { printk(KERN_INFO PREFIX "Skipping IOAPIC probe " "due to 'noapic' option.\n"); return -ENODEV; } count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic, MAX_IO_APICS); if (!count) { printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); return -ENODEV; } else if (count < 0) { printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); return count; } count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, nr_irqs); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } /* * If BIOS did not supply an INT_SRC_OVR for the SCI * pretend we got one so we can set the SCI flags. */ if (!acpi_sci_override_gsi) acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0, acpi_gbl_FADT.sci_interrupt); /* Fill in identity legacy mappings where no override */ mp_config_acpi_legacy_irqs(); count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, nr_irqs); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } return 0; } #else static inline int acpi_parse_madt_ioapic_entries(void) { return -1; } #endif /* !CONFIG_X86_IO_APIC */ static void __init early_acpi_process_madt(void) { #ifdef CONFIG_X86_LOCAL_APIC int error; if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { /* * Parse MADT LAPIC entries */ error = early_acpi_parse_madt_lapic_addr_ovr(); if (!error) { acpi_lapic = 1; smp_found_config = 1; } if (error == -EINVAL) { /* * Dell Precision Workstation 410, 610 come here. */ printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n"); disable_acpi(); } } #endif } static void __init acpi_process_madt(void) { #ifdef CONFIG_X86_LOCAL_APIC int error; if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { /* * Parse MADT LAPIC entries */ error = acpi_parse_madt_lapic_entries(); if (!error) { acpi_lapic = 1; /* * Parse MADT IO-APIC entries */ error = acpi_parse_madt_ioapic_entries(); if (!error) { acpi_set_irq_model_ioapic(); smp_found_config = 1; } } if (error == -EINVAL) { /* * Dell Precision Workstation 410, 610 come here. */ printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n"); disable_acpi(); } } else { /* * ACPI found no MADT, and so ACPI wants UP PIC mode. * In the event an MPS table was found, forget it. * Boot with "acpi=off" to use MPS on such a system. */ if (smp_found_config) { printk(KERN_WARNING PREFIX "No APIC-table, disabling MPS\n"); smp_found_config = 0; } } /* * ACPI supports both logical (e.g. Hyper-Threading) and physical * processors, where MPS only supports physical. */ if (acpi_lapic && acpi_ioapic) printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " "information\n"); else if (acpi_lapic) printk(KERN_INFO "Using ACPI for processor (LAPIC) " "configuration information\n"); #endif return; } static int __init disable_acpi_irq(const struct dmi_system_id *d) { if (!acpi_force) { printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n", d->ident); acpi_noirq_set(); } return 0; } static int __init disable_acpi_pci(const struct dmi_system_id *d) { if (!acpi_force) { printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n", d->ident); acpi_disable_pci(); } return 0; } static int __init dmi_disable_acpi(const struct dmi_system_id *d) { if (!acpi_force) { printk(KERN_NOTICE "%s detected: acpi off\n", d->ident); disable_acpi(); } else { printk(KERN_NOTICE "Warning: DMI blacklist says broken, but acpi forced\n"); } return 0; } /* * Force ignoring BIOS IRQ0 override */ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) { if (!acpi_skip_timer_override) { pr_notice("%s detected: Ignoring BIOS IRQ0 override\n", d->ident); acpi_skip_timer_override = 1; } return 0; } /* * If your system is blacklisted here, but you find that acpi=force * works for you, please contact linux-acpi@vger.kernel.org */ static struct dmi_system_id __initdata acpi_dmi_table[] = { /* * Boxes that need ACPI disabled */ { .callback = dmi_disable_acpi, .ident = "IBM Thinkpad", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2629H1G"), }, }, /* * Boxes that need ACPI PCI IRQ routing disabled */ { .callback = disable_acpi_irq, .ident = "ASUS A7V", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"), DMI_MATCH(DMI_BOARD_NAME, "<A7V>"), /* newer BIOS, Revision 1011, does work */ DMI_MATCH(DMI_BIOS_VERSION, "ASUS A7V ACPI BIOS Revision 1007"), }, }, { /* * Latest BIOS for IBM 600E (1.16) has bad pcinum * for LPC bridge, which is needed for the PCI * interrupt links to work. DSDT fix is in bug 5966. * 2645, 2646 model numbers are shared with 600/600E/600X */ .callback = disable_acpi_irq, .ident = "IBM Thinkpad 600 Series 2645", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2645"), }, }, { .callback = disable_acpi_irq, .ident = "IBM Thinkpad 600 Series 2646", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2646"), }, }, /* * Boxes that need ACPI PCI IRQ routing and PCI scan disabled */ { /* _BBN 0 bug */ .callback = disable_acpi_pci, .ident = "ASUS PR-DLS", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"), DMI_MATCH(DMI_BIOS_VERSION, "ASUS PR-DLS ACPI BIOS Revision 1010"), DMI_MATCH(DMI_BIOS_DATE, "03/21/2003") }, }, { .callback = disable_acpi_pci, .ident = "Acer TravelMate 36x Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, {} }; /* second table for DMI checks that should run after early-quirks */ static struct dmi_system_id __initdata acpi_dmi_table_late[] = { /* * HP laptops which use a DSDT reporting as HP/SB400/10000, * which includes some code which overrides all temperature * trip points to 16C if the INTIN2 input of the I/O APIC * is enabled. This input is incorrectly designated the * ISA IRQ 0 via an interrupt source override even though * it is wired to the output of the master 8259A and INTIN0 * is not connected at all. Force ignoring BIOS IRQ0 * override in that cases. */ { .callback = dmi_ignore_irq0_timer_override, .ident = "HP nx6115 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP NX6125 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6125"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP NX6325 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP 6715b laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "FUJITSU SIEMENS", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"), }, }, {} }; /* * acpi_boot_table_init() and acpi_boot_init() * called from setup_arch(), always. * 1. checksums all tables * 2. enumerates lapics * 3. enumerates io-apics * * acpi_table_init() is separate to allow reading SRAT without * other side effects. * * side effects of acpi_boot_init: * acpi_lapic = 1 if LAPIC found * acpi_ioapic = 1 if IOAPIC found * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; * if acpi_blacklisted() acpi_disabled = 1; * acpi_irq_model=... * ... */ void __init acpi_boot_table_init(void) { dmi_check_system(acpi_dmi_table); /* * If acpi_disabled, bail out */ if (acpi_disabled) return; /* * Initialize the ACPI boot-time table parser. */ if (acpi_table_init()) { disable_acpi(); return; } acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); /* * blacklist may disable ACPI entirely */ if (acpi_blacklisted()) { if (acpi_force) { printk(KERN_WARNING PREFIX "acpi=force override\n"); } else { printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); disable_acpi(); return; } } } int __init early_acpi_boot_init(void) { /* * If acpi_disabled, bail out */ if (acpi_disabled) return 1; /* * Process the Multiple APIC Description Table (MADT), if present */ early_acpi_process_madt(); return 0; } int __init acpi_boot_init(void) { /* those are executed after early-quirks are executed */ dmi_check_system(acpi_dmi_table_late); /* * If acpi_disabled, bail out */ if (acpi_disabled) return 1; acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); /* * set sci_int and PM timer address */ acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt); /* * Process the Multiple APIC Description Table (MADT), if present */ acpi_process_madt(); acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); if (!acpi_noirq) x86_init.pci.init = pci_acpi_init; return 0; } static int __init parse_acpi(char *arg) { if (!arg) return -EINVAL; /* "acpi=off" disables both ACPI table parsing and interpreter */ if (strcmp(arg, "off") == 0) { disable_acpi(); } /* acpi=force to over-ride black-list */ else if (strcmp(arg, "force") == 0) { acpi_force = 1; acpi_disabled = 0; } /* acpi=strict disables out-of-spec workarounds */ else if (strcmp(arg, "strict") == 0) { acpi_strict = 1; } /* acpi=rsdt use RSDT instead of XSDT */ else if (strcmp(arg, "rsdt") == 0) { acpi_rsdt_forced = 1; } /* "acpi=noirq" disables ACPI interrupt routing */ else if (strcmp(arg, "noirq") == 0) { acpi_noirq_set(); } /* "acpi=copy_dsdt" copys DSDT */ else if (strcmp(arg, "copy_dsdt") == 0) { acpi_gbl_copy_dsdt_locally = 1; } else { /* Core will printk when we return error. */ return -EINVAL; } return 0; } early_param("acpi", parse_acpi); /* FIXME: Using pci= for an ACPI parameter is a travesty. */ static int __init parse_pci(char *arg) { if (arg && strcmp(arg, "noacpi") == 0) acpi_disable_pci(); return 0; } early_param("pci", parse_pci); int __init acpi_mps_check(void) { #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE) /* mptable code is not built-in*/ if (acpi_disabled || acpi_noirq) { printk(KERN_WARNING "MPS support code is not built-in.\n" "Using acpi=off or acpi=noirq or pci=noacpi " "may have problem\n"); return 1; } #endif return 0; } #ifdef CONFIG_X86_IO_APIC static int __init parse_acpi_skip_timer_override(char *arg) { acpi_skip_timer_override = 1; return 0; } early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); static int __init parse_acpi_use_timer_override(char *arg) { acpi_use_timer_override = 1; return 0; } early_param("acpi_use_timer_override", parse_acpi_use_timer_override); #endif /* CONFIG_X86_IO_APIC */ static int __init setup_acpi_sci(char *s) { if (!s) return -EINVAL; if (!strcmp(s, "edge")) acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE | (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); else if (!strcmp(s, "level")) acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL | (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); else if (!strcmp(s, "high")) acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH | (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); else if (!strcmp(s, "low")) acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW | (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); else return -EINVAL; return 0; } early_param("acpi_sci", setup_acpi_sci); int __acpi_acquire_global_lock(unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return (new < 3) ? -1 : 0; } int __acpi_release_global_lock(unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = old & ~0x3; val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return old & 0x1; }
gpl-2.0
e9wifi-dev/android_kernel_lge_e9wifi
drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c
2198
26257
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "vcd_ddl.h" #include "vcd_ddl_shared_mem.h" #include "vidc.h" static u32 ddl_handle_hw_fatal_errors(struct ddl_client_context *ddl); static u32 ddl_handle_client_fatal_errors( struct ddl_client_context *ddl); static void ddl_input_failed_cb(struct ddl_client_context *ddl, u32 vcd_event, u32 vcd_status); static u32 ddl_handle_core_recoverable_errors( struct ddl_client_context *ddl); static u32 ddl_handle_core_warnings(u32 error_code); static void ddl_release_prev_field( struct ddl_client_context *ddl); static u32 ddl_handle_dec_seq_hdr_fail_error(struct ddl_client_context *ddl); static void print_core_errors(u32 error_code); static void print_core_recoverable_errors(u32 error_code); void ddl_hw_fatal_cb(struct ddl_client_context *ddl) { struct ddl_context *ddl_context = ddl->ddl_context; u32 error_code = ddl_context->cmd_err_status; DDL_MSG_FATAL("VIDC_HW_FATAL"); ddl->cmd_state = DDL_CMD_INVALID; ddl_context->device_state = DDL_DEVICE_HWFATAL; ddl_context->ddl_callback(VCD_EVT_IND_HWERRFATAL, VCD_ERR_HW_FATAL, &error_code, sizeof(error_code), (u32 *)ddl, ddl->client_data); ddl_release_command_channel(ddl_context, ddl->command_channel); } static u32 ddl_handle_hw_fatal_errors(struct ddl_client_context *ddl) { struct ddl_context *ddl_context = ddl->ddl_context; u32 status = false, error_code = ddl_context->cmd_err_status; switch (error_code) { case VIDC_1080P_ERROR_INVALID_CHANNEL_NUMBER: case VIDC_1080P_ERROR_INVALID_COMMAND_ID: case VIDC_1080P_ERROR_CHANNEL_ALREADY_IN_USE: case VIDC_1080P_ERROR_CHANNEL_NOT_OPEN_BEFORE_CHANNEL_CLOSE: case VIDC_1080P_ERROR_OPEN_CH_ERROR_SEQ_START: case VIDC_1080P_ERROR_SEQ_START_ALREADY_CALLED: case VIDC_1080P_ERROR_OPEN_CH_ERROR_INIT_BUFFERS: case VIDC_1080P_ERROR_SEQ_START_ERROR_INIT_BUFFERS: case VIDC_1080P_ERROR_INIT_BUFFER_ALREADY_CALLED: case VIDC_1080P_ERROR_OPEN_CH_ERROR_FRAME_START: case VIDC_1080P_ERROR_SEQ_START_ERROR_FRAME_START: case VIDC_1080P_ERROR_INIT_BUFFERS_ERROR_FRAME_START: case VIDC_1080P_ERROR_RESOLUTION_CHANGED: case VIDC_1080P_ERROR_INVALID_COMMAND_LAST_FRAME: case VIDC_1080P_ERROR_INVALID_COMMAND: case VIDC_1080P_ERROR_INVALID_CODEC_TYPE: case VIDC_1080P_ERROR_MEM_ALLOCATION_FAILED: case VIDC_1080P_ERROR_INSUFFICIENT_CONTEXT_SIZE: case VIDC_1080P_ERROR_DIVIDE_BY_ZERO: case VIDC_1080P_ERROR_DMA_TX_NOT_COMPLETE: case VIDC_1080P_ERROR_VSP_NOT_READY: case VIDC_1080P_ERROR_BUFFER_FULL_STATE: ddl_hw_fatal_cb(ddl); status = true; break; default: break; } return status; } void ddl_client_fatal_cb(struct ddl_client_context *ddl) { struct ddl_context *ddl_context = ddl->ddl_context; if (ddl->cmd_state == DDL_CMD_DECODE_FRAME) ddl_vidc_decode_dynamic_property(ddl, false); else if (ddl->cmd_state == DDL_CMD_ENCODE_FRAME) ddl_vidc_encode_dynamic_property(ddl, false); ddl->cmd_state = DDL_CMD_INVALID; DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_FAVIDC_ERROR", ddl_get_state_string(ddl->client_state)); ddl->client_state = DDL_CLIENT_FAVIDC_ERROR; ddl_context->ddl_callback(VCD_EVT_IND_HWERRFATAL, VCD_ERR_CLIENT_FATAL, NULL, 0, (u32 *)ddl, ddl->client_data); ddl_release_command_channel(ddl_context, ddl->command_channel); } static u32 ddl_handle_client_fatal_errors( struct ddl_client_context *ddl) { struct ddl_context *ddl_context = ddl->ddl_context; u32 status = false; switch (ddl_context->cmd_err_status) { case VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE: case VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED: case VIDC_1080P_ERROR_VOS_END_CODE_RECEIVED: case VIDC_1080P_ERROR_FRAME_RATE_NOT_SUPPORTED: case VIDC_1080P_ERROR_INVALID_QP_VALUE: case VIDC_1080P_ERROR_INVALID_RC_REACTION_COEFFICIENT: case VIDC_1080P_ERROR_INVALID_CPB_SIZE_AT_GIVEN_LEVEL: case VIDC_1080P_ERROR_B_FRAME_NOT_SUPPORTED: case VIDC_1080P_ERROR_ALLOC_DPB_SIZE_NOT_SUFFICIENT: case VIDC_1080P_ERROR_NUM_DPB_OUT_OF_RANGE: case VIDC_1080P_ERROR_NULL_METADATA_INPUT_POINTER: case VIDC_1080P_ERROR_NULL_DPB_POINTER: case VIDC_1080P_ERROR_NULL_OTH_EXT_BUFADDR: case VIDC_1080P_ERROR_NULL_MV_POINTER: status = true; DDL_MSG_ERROR("VIDC_CLIENT_FATAL!!"); break; default: break; } if (!status) DDL_MSG_ERROR("VIDC_UNKNOWN_OP_FAILED %d", ddl_context->cmd_err_status); ddl_client_fatal_cb(ddl); return true; } static void ddl_input_failed_cb(struct ddl_client_context *ddl, u32 vcd_event, u32 vcd_status) { struct ddl_context *ddl_context = ddl->ddl_context; u32 payload_size = sizeof(struct ddl_frame_data_tag); ddl->cmd_state = DDL_CMD_INVALID; if (ddl->decoding) ddl_vidc_decode_dynamic_property(ddl, false); else ddl_vidc_encode_dynamic_property(ddl, false); if (ddl->client_state == DDL_CLIENT_WAIT_FOR_INITCODECDONE) { payload_size = 0; DDL_MSG_LOW("ddl_state_transition: %s ~~> " "DDL_CLIENT_WAIT_FOR_INITCODEC", ddl_get_state_string(ddl->client_state)); ddl->client_state = DDL_CLIENT_WAIT_FOR_INITCODEC; } else { DDL_MSG_LOW("ddl_state_transition: %s ~~> " "DDL_CLIENT_WAIT_FOR_FRAME", ddl_get_state_string(ddl->client_state)); ddl->client_state = DDL_CLIENT_WAIT_FOR_FRAME; } if (vcd_status == VCD_ERR_IFRAME_EXPECTED) vcd_status = VCD_S_SUCCESS; ddl_context->ddl_callback(vcd_event, vcd_status, &ddl->input_frame, payload_size, (u32 *)ddl, ddl->client_data); } static u32 ddl_handle_core_recoverable_errors( struct ddl_client_context *ddl) { struct ddl_context *ddl_context = ddl->ddl_context; u32 vcd_status = VCD_S_SUCCESS; u32 vcd_event = VCD_EVT_RESP_INPUT_DONE; u32 eos = false, status = false; if (ddl->decoding) { if (ddl_handle_dec_seq_hdr_fail_error(ddl)) return true; } if ((ddl->cmd_state != DDL_CMD_DECODE_FRAME) && (ddl->cmd_state != DDL_CMD_ENCODE_FRAME)) return false; if (ddl->decoding && (ddl->codec_data.decoder.field_needed_for_prev_ip == 1)) { ddl->codec_data.decoder.field_needed_for_prev_ip = 0; ddl_release_prev_field(ddl); if (ddl_context->cmd_err_status == VIDC_1080P_ERROR_NON_PAIRED_FIELD_NOT_SUPPORTED) { ddl_vidc_decode_frame_run(ddl); return true; } } switch (ddl_context->cmd_err_status) { case VIDC_1080P_ERROR_SYNC_POINT_NOT_RECEIVED: vcd_status = VCD_ERR_IFRAME_EXPECTED; break; case VIDC_1080P_ERROR_NO_BUFFER_RELEASED_FROM_HOST: { u32 pending_display = 0, release_mask; release_mask = ddl->codec_data.decoder.\ dpb_mask.hw_mask; while (release_mask > 0) { if (release_mask & 0x1) pending_display++; release_mask >>= 1; } if (pending_display >= ddl->codec_data.\ decoder.min_dpb_num) { DDL_MSG_ERROR("VIDC_FW_ISSUE_REQ_BUF"); ddl_client_fatal_cb(ddl); status = true; } else { vcd_event = VCD_EVT_RESP_OUTPUT_REQ; DDL_MSG_LOW("VIDC_OUTPUT_BUF_REQ!!"); } break; } case VIDC_1080P_ERROR_NON_IDR_FRAME_TYPE: case VIDC_1080P_ERROR_BIT_STREAM_BUF_EXHAUST: case VIDC_1080P_ERROR_DESCRIPTOR_TABLE_ENTRY_INVALID: case VIDC_1080P_ERROR_MB_COEFF_NOT_DONE: case VIDC_1080P_ERROR_CODEC_SLICE_NOT_DONE: case VIDC_1080P_ERROR_VIDC_CORE_TIME_OUT: case VIDC_1080P_ERROR_VC1_BITPLANE_DECODE_ERR: case VIDC_1080P_ERROR_RESOLUTION_MISMATCH: case VIDC_1080P_ERROR_NV_QUANT_ERR: case VIDC_1080P_ERROR_SYNC_MARKER_ERR: case VIDC_1080P_ERROR_FEATURE_NOT_SUPPORTED: case VIDC_1080P_ERROR_MEM_CORRUPTION: case VIDC_1080P_ERROR_INVALID_REFERENCE_FRAME: case VIDC_1080P_ERROR_PICTURE_CODING_TYPE_ERR: case VIDC_1080P_ERROR_MV_RANGE_ERR: case VIDC_1080P_ERROR_PICTURE_STRUCTURE_ERR: case VIDC_1080P_ERROR_SLICE_ADDR_INVALID: case VIDC_1080P_ERROR_NON_FRAME_DATA_RECEIVED: case VIDC_1080P_ERROR_NALU_HEADER_ERROR: case VIDC_1080P_ERROR_SPS_PARSE_ERROR: case VIDC_1080P_ERROR_PPS_PARSE_ERROR: case VIDC_1080P_ERROR_HEADER_NOT_FOUND: case VIDC_1080P_ERROR_SLICE_PARSE_ERROR: case VIDC_1080P_ERROR_NON_PAIRED_FIELD_NOT_SUPPORTED: case VIDC_1080P_ERROR_DESCRIPTOR_BUFFER_EMPTY: vcd_status = VCD_ERR_BITSTREAM_ERR; DDL_MSG_ERROR("VIDC_BIT_STREAM_ERR (%u)", (u32)ddl_context->cmd_err_status); break; case VIDC_1080P_ERROR_B_FRAME_NOT_SUPPORTED: case VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE: case VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED: if (ddl->decoding) { vcd_status = VCD_ERR_BITSTREAM_ERR; DDL_MSG_ERROR("VIDC_BIT_STREAM_ERR (%u)", (u32)ddl_context->cmd_err_status); } break; default: break; } if (((vcd_status) || (vcd_event != VCD_EVT_RESP_INPUT_DONE)) && !status) { ddl->input_frame.frm_trans_end = true; eos = ((vcd_event == VCD_EVT_RESP_INPUT_DONE) && (ddl->input_frame.vcd_frm.flags & VCD_FRAME_FLAG_EOS)); if (((ddl->decoding) && (eos)) || !ddl->decoding) ddl->input_frame.frm_trans_end = false; ddl_input_failed_cb(ddl, vcd_event, vcd_status); if (!ddl->decoding) { ddl->output_frame.frm_trans_end = !eos; ddl->output_frame.vcd_frm.data_len = 0; ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_ERR_FAIL, &ddl->output_frame, sizeof(struct ddl_frame_data_tag), (u32 *)ddl, ddl->client_data); if (eos) { DDL_MSG_LOW("VIDC_ENC_EOS_DONE"); ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE, VCD_S_SUCCESS, NULL, 0, (u32 *)ddl, ddl->client_data); } } if ((ddl->decoding) && (eos)) ddl_vidc_decode_eos_run(ddl); else ddl_release_command_channel(ddl_context, ddl->command_channel); status = true; } return status; } static u32 ddl_handle_core_warnings(u32 err_status) { u32 status = false; switch (err_status) { case VIDC_1080P_WARN_COMMAND_FLUSHED: case VIDC_1080P_WARN_FRAME_RATE_UNKNOWN: case VIDC_1080P_WARN_ASPECT_RATIO_UNKNOWN: case VIDC_1080P_WARN_COLOR_PRIMARIES_UNKNOWN: case VIDC_1080P_WARN_TRANSFER_CHAR_UNKNOWN: case VIDC_1080P_WARN_MATRIX_COEFF_UNKNOWN: case VIDC_1080P_WARN_NON_SEQ_SLICE_ADDR: case VIDC_1080P_WARN_BROKEN_LINK: case VIDC_1080P_WARN_FRAME_CONCEALED: case VIDC_1080P_WARN_PROFILE_UNKNOWN: case VIDC_1080P_WARN_LEVEL_UNKNOWN: case VIDC_1080P_WARN_BIT_RATE_NOT_SUPPORTED: case VIDC_1080P_WARN_COLOR_DIFF_FORMAT_NOT_SUPPORTED: case VIDC_1080P_WARN_NULL_EXTRA_METADATA_POINTER: case VIDC_1080P_WARN_DEBLOCKING_NOT_DONE: case VIDC_1080P_WARN_INCOMPLETE_FRAME: case VIDC_1080P_ERROR_NULL_FW_DEBUG_INFO_POINTER: case VIDC_1080P_ERROR_ALLOC_DEBUG_INFO_SIZE_INSUFFICIENT: case VIDC_1080P_WARN_METADATA_NO_SPACE_NUM_CONCEAL_MB: case VIDC_1080P_WARN_METADATA_NO_SPACE_QP: case VIDC_1080P_WARN_METADATA_NO_SPACE_CONCEAL_MB: case VIDC_1080P_WARN_METADATA_NO_SPACE_VC1_PARAM: case VIDC_1080P_WARN_METADATA_NO_SPACE_SEI: case VIDC_1080P_WARN_METADATA_NO_SPACE_VUI: case VIDC_1080P_WARN_METADATA_NO_SPACE_EXTRA: case VIDC_1080P_WARN_METADATA_NO_SPACE_DATA_NONE: case VIDC_1080P_WARN_METADATA_NO_SPACE_MB_INFO: case VIDC_1080P_WARN_METADATA_NO_SPACE_SLICE_SIZE: case VIDC_1080P_WARN_RESOLUTION_WARNING: case VIDC_1080P_WARN_NO_LONG_TERM_REFERENCE: case VIDC_1080P_WARN_NO_SPACE_MPEG2_DATA_DUMP: case VIDC_1080P_WARN_METADATA_NO_SPACE_MISSING_MB: status = true; DDL_MSG_ERROR("VIDC_WARNING_IGNORED"); break; default: break; } return status; } u32 ddl_handle_core_errors(struct ddl_context *ddl_context) { struct ddl_client_context *ddl; u32 channel_inst_id, status = false; u32 disp_status; if (!ddl_context->cmd_err_status && !ddl_context->disp_pic_err_status) { DDL_MSG_ERROR("VIDC_NO_ERROR"); return false; } vidc_1080p_get_returned_channel_inst_id(&channel_inst_id); vidc_1080p_clear_returned_channel_inst_id(); ddl = ddl_get_current_ddl_client_for_channel_id(ddl_context, ddl_context->response_cmd_ch_id); if (!ddl) { DDL_MSG_ERROR("VIDC_SPURIOUS_INTERRUPT_ERROR"); return true; } if (ddl_context->cmd_err_status) { print_core_errors(ddl_context->cmd_err_status); print_core_recoverable_errors(ddl_context->cmd_err_status); } if (ddl_context->disp_pic_err_status) print_core_errors(ddl_context->disp_pic_err_status); status = ddl_handle_core_warnings(ddl_context->cmd_err_status); disp_status = ddl_handle_core_warnings( ddl_context->disp_pic_err_status); if (!status && !disp_status) { DDL_MSG_ERROR("ddl_warning:Unknown"); status = ddl_handle_hw_fatal_errors(ddl); if (!status) status = ddl_handle_core_recoverable_errors(ddl); if (!status) status = ddl_handle_client_fatal_errors(ddl); } return status; } static void ddl_release_prev_field(struct ddl_client_context *ddl) { ddl->output_frame.vcd_frm.ip_frm_tag = ddl->codec_data.decoder.prev_ip_frm_tag; ddl->output_frame.vcd_frm.physical = NULL; ddl->output_frame.vcd_frm.virtual = NULL; ddl->output_frame.frm_trans_end = false; ddl->ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_ERR_INTRLCD_FIELD_DROP, &(ddl->output_frame), sizeof(struct ddl_frame_data_tag), (u32 *) ddl, ddl->client_data); } static u32 ddl_handle_dec_seq_hdr_fail_error(struct ddl_client_context *ddl) { struct ddl_context *ddl_context = ddl->ddl_context; u32 status = false; if ((ddl->cmd_state != DDL_CMD_HEADER_PARSE) || (ddl->client_state != DDL_CLIENT_WAIT_FOR_INITCODECDONE)) { DDL_MSG_ERROR("STATE-CRITICAL-HDDONE"); return false; } switch (ddl_context->cmd_err_status) { case VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE: case VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED: case VIDC_1080P_ERROR_HEADER_NOT_FOUND: case VIDC_1080P_ERROR_SPS_PARSE_ERROR: case VIDC_1080P_ERROR_PPS_PARSE_ERROR: { struct ddl_decoder_data *decoder = &ddl->codec_data.decoder; if (ddl_context->cmd_err_status == VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE && decoder->codec.codec == VCD_CODEC_H264) { DDL_MSG_ERROR("Unsupported Feature for H264"); ddl_client_fatal_cb(ddl); return true; } if ((ddl_context->cmd_err_status == VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED) && (decoder->codec.codec == VCD_CODEC_H263 || decoder->codec.codec == VCD_CODEC_H264 || decoder->codec.codec == VCD_CODEC_MPEG4 || decoder->codec.codec == VCD_CODEC_VC1 || decoder->codec.codec == VCD_CODEC_VC1_RCV)) { DDL_MSG_ERROR("Unsupported resolution"); ddl_client_fatal_cb(ddl); return true; } DDL_MSG_ERROR("SEQHDR-FAILED"); if (decoder->header_in_start) { decoder->header_in_start = false; ddl_context->ddl_callback(VCD_EVT_RESP_START, VCD_ERR_SEQHDR_PARSE_FAIL, NULL, 0, (u32 *) ddl, ddl->client_data); } else { ddl->input_frame.frm_trans_end = true; if ((ddl->input_frame.vcd_frm.flags & VCD_FRAME_FLAG_EOS)) { ddl->input_frame.frm_trans_end = false; } ddl_vidc_decode_dynamic_property(ddl, false); ddl_context->ddl_callback( VCD_EVT_RESP_INPUT_DONE, VCD_ERR_SEQHDR_PARSE_FAIL, &ddl->input_frame, sizeof(struct ddl_frame_data_tag), (u32 *)ddl, ddl->client_data); if ((ddl->input_frame.vcd_frm.flags & VCD_FRAME_FLAG_EOS)) { DDL_MSG_HIGH("EOS_DONE-fromDDL"); ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE, VCD_S_SUCCESS, NULL, 0, (u32 *) ddl, ddl->client_data); } } DDL_MSG_LOW("ddl_state_transition: %s ~~> " "DDL_CLIENT_WAIT_FOR_INITCODEC", ddl_get_state_string(ddl->client_state)); ddl->client_state = DDL_CLIENT_WAIT_FOR_INITCODEC; ddl_release_command_channel(ddl_context, ddl->command_channel); status = true; break; } default: break; } return status; } void print_core_errors(u32 error_code) { s8 *string = NULL; switch (error_code) { case VIDC_1080P_ERROR_INVALID_CHANNEL_NUMBER: string = "VIDC_1080P_ERROR_INVALID_CHANNEL_NUMBER"; break; case VIDC_1080P_ERROR_INVALID_COMMAND_ID: string = "VIDC_1080P_ERROR_INVALID_COMMAND_ID"; break; case VIDC_1080P_ERROR_CHANNEL_ALREADY_IN_USE: string = "VIDC_1080P_ERROR_CHANNEL_ALREADY_IN_USE"; break; case VIDC_1080P_ERROR_CHANNEL_NOT_OPEN_BEFORE_CHANNEL_CLOSE: string = "VIDC_1080P_ERROR_CHANNEL_NOT_OPEN_BEFORE_CHANNEL_CLOSE"; break; case VIDC_1080P_ERROR_OPEN_CH_ERROR_SEQ_START: string = "VIDC_1080P_ERROR_OPEN_CH_ERROR_SEQ_START"; break; case VIDC_1080P_ERROR_SEQ_START_ALREADY_CALLED: string = "VIDC_1080P_ERROR_SEQ_START_ALREADY_CALLED"; break; case VIDC_1080P_ERROR_OPEN_CH_ERROR_INIT_BUFFERS: string = "VIDC_1080P_ERROR_OPEN_CH_ERROR_INIT_BUFFERS"; break; case VIDC_1080P_ERROR_SEQ_START_ERROR_INIT_BUFFERS: string = "VIDC_1080P_ERROR_SEQ_START_ERROR_INIT_BUFFERS"; break; case VIDC_1080P_ERROR_INIT_BUFFER_ALREADY_CALLED: string = "VIDC_1080P_ERROR_INIT_BUFFER_ALREADY_CALLED"; break; case VIDC_1080P_ERROR_OPEN_CH_ERROR_FRAME_START: string = "VIDC_1080P_ERROR_OPEN_CH_ERROR_FRAME_START"; break; case VIDC_1080P_ERROR_SEQ_START_ERROR_FRAME_START: string = "VIDC_1080P_ERROR_SEQ_START_ERROR_FRAME_START"; break; case VIDC_1080P_ERROR_INIT_BUFFERS_ERROR_FRAME_START: string = "VIDC_1080P_ERROR_INIT_BUFFERS_ERROR_FRAME_START"; break; case VIDC_1080P_ERROR_RESOLUTION_CHANGED: string = "VIDC_1080P_ERROR_RESOLUTION_CHANGED"; break; case VIDC_1080P_ERROR_INVALID_COMMAND_LAST_FRAME: string = "VIDC_1080P_ERROR_INVALID_COMMAND_LAST_FRAME"; break; case VIDC_1080P_ERROR_INVALID_COMMAND: string = "VIDC_1080P_ERROR_INVALID_COMMAND"; break; case VIDC_1080P_ERROR_INVALID_CODEC_TYPE: string = "VIDC_1080P_ERROR_INVALID_CODEC_TYPE"; break; case VIDC_1080P_ERROR_MEM_ALLOCATION_FAILED: string = "VIDC_1080P_ERROR_MEM_ALLOCATION_FAILED"; break; case VIDC_1080P_ERROR_INSUFFICIENT_CONTEXT_SIZE: string = "VIDC_1080P_ERROR_INSUFFICIENT_CONTEXT_SIZE"; break; case VIDC_1080P_ERROR_DIVIDE_BY_ZERO: string = "VIDC_1080P_ERROR_DIVIDE_BY_ZERO"; break; case VIDC_1080P_ERROR_DESCRIPTOR_BUFFER_EMPTY: string = "VIDC_1080P_ERROR_DESCRIPTOR_BUFFER_EMPTY"; break; case VIDC_1080P_ERROR_DMA_TX_NOT_COMPLETE: string = "VIDC_1080P_ERROR_DMA_TX_NOT_COMPLETE"; break; case VIDC_1080P_ERROR_VSP_NOT_READY: string = "VIDC_1080P_ERROR_VSP_NOT_READY"; break; case VIDC_1080P_ERROR_BUFFER_FULL_STATE: string = "VIDC_1080P_ERROR_BUFFER_FULL_STATE"; break; case VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE: string = "VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE"; break; case VIDC_1080P_ERROR_HEADER_NOT_FOUND: string = "VIDC_1080P_ERROR_HEADER_NOT_FOUND"; break; case VIDC_1080P_ERROR_VOS_END_CODE_RECEIVED: string = "VIDC_1080P_ERROR_VOS_END_CODE_RECEIVED"; break; case VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED: string = "VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED"; break; case VIDC_1080P_ERROR_FRAME_RATE_NOT_SUPPORTED: string = "VIDC_1080P_ERROR_FRAME_RATE_NOT_SUPPORTED"; break; case VIDC_1080P_ERROR_INVALID_QP_VALUE: string = "VIDC_1080P_ERROR_INVALID_QP_VALUE"; break; case VIDC_1080P_ERROR_INVALID_RC_REACTION_COEFFICIENT: string = "VIDC_1080P_ERROR_INVALID_RC_REACTION_COEFFICIENT"; break; case VIDC_1080P_ERROR_INVALID_CPB_SIZE_AT_GIVEN_LEVEL: string = "VIDC_1080P_ERROR_INVALID_CPB_SIZE_AT_GIVEN_LEVEL"; break; case VIDC_1080P_ERROR_B_FRAME_NOT_SUPPORTED: string = "VIDC_1080P_ERROR_B_FRAME_NOT_SUPPORTED"; break; case VIDC_1080P_ERROR_ALLOC_DPB_SIZE_NOT_SUFFICIENT: string = "VIDC_1080P_ERROR_ALLOC_DPB_SIZE_NOT_SUFFICIENT"; break; case VIDC_1080P_ERROR_NUM_DPB_OUT_OF_RANGE: string = "VIDC_1080P_ERROR_NUM_DPB_OUT_OF_RANGE"; break; case VIDC_1080P_ERROR_NULL_METADATA_INPUT_POINTER: string = "VIDC_1080P_ERROR_NULL_METADATA_INPUT_POINTER"; break; case VIDC_1080P_ERROR_NULL_DPB_POINTER: string = "VIDC_1080P_ERROR_NULL_DPB_POINTER"; break; case VIDC_1080P_ERROR_NULL_OTH_EXT_BUFADDR: string = "VIDC_1080P_ERROR_NULL_OTH_EXT_BUFADDR"; break; case VIDC_1080P_ERROR_NULL_MV_POINTER: string = "VIDC_1080P_ERROR_NULL_MV_POINTER"; break; case VIDC_1080P_ERROR_NON_PAIRED_FIELD_NOT_SUPPORTED: string = "VIDC_1080P_ERROR_NON_PAIRED_FIELD_NOT_SUPPORTED"; break; case VIDC_1080P_WARN_COMMAND_FLUSHED: string = "VIDC_1080P_WARN_COMMAND_FLUSHED"; break; case VIDC_1080P_WARN_FRAME_RATE_UNKNOWN: string = "VIDC_1080P_WARN_FRAME_RATE_UNKNOWN"; break; case VIDC_1080P_WARN_ASPECT_RATIO_UNKNOWN: string = "VIDC_1080P_WARN_ASPECT_RATIO_UNKNOWN"; break; case VIDC_1080P_WARN_COLOR_PRIMARIES_UNKNOWN: string = "VIDC_1080P_WARN_COLOR_PRIMARIES_UNKNOWN"; break; case VIDC_1080P_WARN_TRANSFER_CHAR_UNKNOWN: string = "VIDC_1080P_WARN_TRANSFER_CHAR_UNKNOWN"; break; case VIDC_1080P_WARN_MATRIX_COEFF_UNKNOWN: string = "VIDC_1080P_WARN_MATRIX_COEFF_UNKNOWN"; break; case VIDC_1080P_WARN_NON_SEQ_SLICE_ADDR: string = "VIDC_1080P_WARN_NON_SEQ_SLICE_ADDR"; break; case VIDC_1080P_WARN_BROKEN_LINK: string = "VIDC_1080P_WARN_BROKEN_LINK"; break; case VIDC_1080P_WARN_FRAME_CONCEALED: string = "VIDC_1080P_WARN_FRAME_CONCEALED"; break; case VIDC_1080P_WARN_PROFILE_UNKNOWN: string = "VIDC_1080P_WARN_PROFILE_UNKNOWN"; break; case VIDC_1080P_WARN_LEVEL_UNKNOWN: string = "VIDC_1080P_WARN_LEVEL_UNKNOWN"; break; case VIDC_1080P_WARN_BIT_RATE_NOT_SUPPORTED: string = "VIDC_1080P_WARN_BIT_RATE_NOT_SUPPORTED"; break; case VIDC_1080P_WARN_COLOR_DIFF_FORMAT_NOT_SUPPORTED: string = "VIDC_1080P_WARN_COLOR_DIFF_FORMAT_NOT_SUPPORTED"; break; case VIDC_1080P_WARN_NULL_EXTRA_METADATA_POINTER: string = "VIDC_1080P_WARN_NULL_EXTRA_METADATA_POINTER"; break; case VIDC_1080P_WARN_DEBLOCKING_NOT_DONE: string = "VIDC_1080P_WARN_DEBLOCKING_NOT_DONE"; break; case VIDC_1080P_WARN_INCOMPLETE_FRAME: string = "VIDC_1080P_WARN_INCOMPLETE_FRAME"; break; case VIDC_1080P_ERROR_NULL_FW_DEBUG_INFO_POINTER: string = "VIDC_1080P_ERROR_NULL_FW_DEBUG_INFO_POINTER"; break; case VIDC_1080P_ERROR_ALLOC_DEBUG_INFO_SIZE_INSUFFICIENT: string = "VIDC_1080P_ERROR_ALLOC_DEBUG_INFO_SIZE_INSUFFICIENT"; break; case VIDC_1080P_WARN_METADATA_NO_SPACE_NUM_CONCEAL_MB: string = "VIDC_1080P_WARN_METADATA_NO_SPACE_NUM_CONCEAL_MB"; break; case VIDC_1080P_WARN_METADATA_NO_SPACE_QP: string = "VIDC_1080P_WARN_METADATA_NO_SPACE_QP"; break; case VIDC_1080P_WARN_METADATA_NO_SPACE_CONCEAL_MB: string = "VIDC_1080P_WARN_METADATA_NO_SPACE_CONCEAL_MB"; break; case VIDC_1080P_WARN_METADATA_NO_SPACE_VC1_PARAM: string = "VIDC_1080P_WARN_METADATA_NO_SPACE_VC1_PARAM"; break; case VIDC_1080P_WARN_METADATA_NO_SPACE_SEI: string = "VIDC_1080P_WARN_METADATA_NO_SPACE_SEI"; break; case VIDC_1080P_WARN_METADATA_NO_SPACE_VUI: string = "VIDC_1080P_WARN_METADATA_NO_SPACE_VUI"; break; case VIDC_1080P_WARN_METADATA_NO_SPACE_EXTRA: string = "VIDC_1080P_WARN_METADATA_NO_SPACE_EXTRA"; break; case VIDC_1080P_WARN_METADATA_NO_SPACE_DATA_NONE: string = "VIDC_1080P_WARN_METADATA_NO_SPACE_DATA_NONE"; break; case VIDC_1080P_WARN_METADATA_NO_SPACE_MB_INFO: string = "VIDC_1080P_WARN_METADATA_NO_SPACE_MB_INFO"; break; case VIDC_1080P_WARN_METADATA_NO_SPACE_SLICE_SIZE: string = "VIDC_1080P_WARN_METADATA_NO_SPACE_SLICE_SIZE"; break; case VIDC_1080P_WARN_RESOLUTION_WARNING: string = "VIDC_1080P_WARN_RESOLUTION_WARNING"; break; case VIDC_1080P_WARN_NO_LONG_TERM_REFERENCE: string = "VIDC_1080P_WARN_NO_LONG_TERM_REFERENCE"; break; case VIDC_1080P_WARN_NO_SPACE_MPEG2_DATA_DUMP: string = "VIDC_1080P_WARN_NO_SPACE_MPEG2_DATA_DUMP"; break; case VIDC_1080P_WARN_METADATA_NO_SPACE_MISSING_MB: string = "VIDC_1080P_WARN_METADATA_NO_SPACE_MISSING_MB"; break; } if (string) DDL_MSG_ERROR("Error code = 0x%x : %s", error_code, string); } void print_core_recoverable_errors(u32 error_code) { s8 *string = NULL; switch (error_code) { case VIDC_1080P_ERROR_SYNC_POINT_NOT_RECEIVED: string = "VIDC_1080P_ERROR_SYNC_POINT_NOT_RECEIVED"; break; case VIDC_1080P_ERROR_NO_BUFFER_RELEASED_FROM_HOST: string = "VIDC_1080P_ERROR_NO_BUFFER_RELEASED_FROM_HOST"; break; case VIDC_1080P_ERROR_BIT_STREAM_BUF_EXHAUST: string = "VIDC_1080P_ERROR_BIT_STREAM_BUF_EXHAUST"; break; case VIDC_1080P_ERROR_DESCRIPTOR_TABLE_ENTRY_INVALID: string = "VIDC_1080P_ERROR_DESCRIPTOR_TABLE_ENTRY_INVALID"; break; case VIDC_1080P_ERROR_MB_COEFF_NOT_DONE: string = "VIDC_1080P_ERROR_MB_COEFF_NOT_DONE"; break; case VIDC_1080P_ERROR_CODEC_SLICE_NOT_DONE: string = "VIDC_1080P_ERROR_CODEC_SLICE_NOT_DONE"; break; case VIDC_1080P_ERROR_VIDC_CORE_TIME_OUT: string = "VIDC_1080P_ERROR_VIDC_CORE_TIME_OUT"; break; case VIDC_1080P_ERROR_VC1_BITPLANE_DECODE_ERR: string = "VIDC_1080P_ERROR_VC1_BITPLANE_DECODE_ERR"; break; case VIDC_1080P_ERROR_RESOLUTION_MISMATCH: string = "VIDC_1080P_ERROR_RESOLUTION_MISMATCH"; break; case VIDC_1080P_ERROR_NV_QUANT_ERR: string = "VIDC_1080P_ERROR_NV_QUANT_ERR"; break; case VIDC_1080P_ERROR_SYNC_MARKER_ERR: string = "VIDC_1080P_ERROR_SYNC_MARKER_ERR"; break; case VIDC_1080P_ERROR_FEATURE_NOT_SUPPORTED: string = "VIDC_1080P_ERROR_FEATURE_NOT_SUPPORTED"; break; case VIDC_1080P_ERROR_MEM_CORRUPTION: string = "VIDC_1080P_ERROR_MEM_CORRUPTION"; break; case VIDC_1080P_ERROR_INVALID_REFERENCE_FRAME: string = "VIDC_1080P_ERROR_INVALID_REFERENCE_FRAME"; break; case VIDC_1080P_ERROR_PICTURE_CODING_TYPE_ERR: string = "VIDC_1080P_ERROR_PICTURE_CODING_TYPE_ERR"; break; case VIDC_1080P_ERROR_MV_RANGE_ERR: string = "VIDC_1080P_ERROR_MV_RANGE_ERR"; break; case VIDC_1080P_ERROR_PICTURE_STRUCTURE_ERR: string = "VIDC_1080P_ERROR_PICTURE_STRUCTURE_ERR"; break; case VIDC_1080P_ERROR_SLICE_ADDR_INVALID: string = "VIDC_1080P_ERROR_SLICE_ADDR_INVALID"; break; case VIDC_1080P_ERROR_NON_FRAME_DATA_RECEIVED: string = "VIDC_1080P_ERROR_NON_FRAME_DATA_RECEIVED"; break; case VIDC_1080P_ERROR_NALU_HEADER_ERROR: string = "VIDC_1080P_ERROR_NALU_HEADER_ERROR"; break; case VIDC_1080P_ERROR_SPS_PARSE_ERROR: string = "VIDC_1080P_ERROR_SPS_PARSE_ERROR"; break; case VIDC_1080P_ERROR_PPS_PARSE_ERROR: string = "VIDC_1080P_ERROR_PPS_PARSE_ERROR"; break; case VIDC_1080P_ERROR_SLICE_PARSE_ERROR: string = "VIDC_1080P_ERROR_SLICE_PARSE_ERROR"; break; } if (string) DDL_MSG_LOW("Recoverable Error code = 0x%x : %s", error_code, string); }
gpl-2.0
subtek/axiomk
arch/tile/lib/exports.c
2198
2894
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * Exports from assembler code and from libtile-cc. */ #include <linux/module.h> /* arch/tile/lib/usercopy.S */ #include <linux/uaccess.h> EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_4); EXPORT_SYMBOL(__get_user_8); EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_8); EXPORT_SYMBOL(strnlen_user_asm); EXPORT_SYMBOL(strncpy_from_user_asm); EXPORT_SYMBOL(clear_user_asm); EXPORT_SYMBOL(flush_user_asm); EXPORT_SYMBOL(inv_user_asm); EXPORT_SYMBOL(finv_user_asm); /* arch/tile/kernel/entry.S */ #include <linux/kernel.h> #include <asm/processor.h> EXPORT_SYMBOL(current_text_addr); EXPORT_SYMBOL(dump_stack); /* arch/tile/lib/, various memcpy files */ EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(__copy_to_user_inatomic); EXPORT_SYMBOL(__copy_from_user_inatomic); EXPORT_SYMBOL(__copy_from_user_zeroing); #ifdef __tilegx__ EXPORT_SYMBOL(__copy_in_user_inatomic); #endif /* hypervisor glue */ #include <hv/hypervisor.h> EXPORT_SYMBOL(hv_dev_open); EXPORT_SYMBOL(hv_dev_pread); EXPORT_SYMBOL(hv_dev_pwrite); EXPORT_SYMBOL(hv_dev_preada); EXPORT_SYMBOL(hv_dev_pwritea); EXPORT_SYMBOL(hv_dev_poll); EXPORT_SYMBOL(hv_dev_poll_cancel); EXPORT_SYMBOL(hv_dev_close); EXPORT_SYMBOL(hv_sysconf); EXPORT_SYMBOL(hv_confstr); /* libgcc.a */ uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); EXPORT_SYMBOL(__udivsi3); int32_t __divsi3(int32_t dividend, int32_t divisor); EXPORT_SYMBOL(__divsi3); uint64_t __udivdi3(uint64_t dividend, uint64_t divisor); EXPORT_SYMBOL(__udivdi3); int64_t __divdi3(int64_t dividend, int64_t divisor); EXPORT_SYMBOL(__divdi3); uint32_t __umodsi3(uint32_t dividend, uint32_t divisor); EXPORT_SYMBOL(__umodsi3); int32_t __modsi3(int32_t dividend, int32_t divisor); EXPORT_SYMBOL(__modsi3); uint64_t __umoddi3(uint64_t dividend, uint64_t divisor); EXPORT_SYMBOL(__umoddi3); int64_t __moddi3(int64_t dividend, int64_t divisor); EXPORT_SYMBOL(__moddi3); #ifndef __tilegx__ uint64_t __ll_mul(uint64_t n0, uint64_t n1); EXPORT_SYMBOL(__ll_mul); int64_t __muldi3(int64_t, int64_t); EXPORT_SYMBOL(__muldi3); uint64_t __lshrdi3(uint64_t, unsigned int); EXPORT_SYMBOL(__lshrdi3); uint64_t __ashrdi3(uint64_t, unsigned int); EXPORT_SYMBOL(__ashrdi3); uint64_t __ashldi3(uint64_t, unsigned int); EXPORT_SYMBOL(__ashldi3); #endif
gpl-2.0
szezso/T.E.S.C.O-kernel_vivo
drivers/net/wireless/iwlwifi/iwl-sv-open.c
2454
19793
/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <net/net_namespace.h> #include <linux/netdevice.h> #include <net/cfg80211.h> #include <net/mac80211.h> #include <net/netlink.h> #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-debug.h" #include "iwl-fh.h" #include "iwl-io.h" #include "iwl-agn.h" #include "iwl-testmode.h" /* The TLVs used in the gnl message policy between the kernel module and * user space application. iwl_testmode_gnl_msg_policy is to be carried * through the NL80211_CMD_TESTMODE channel regulated by nl80211. * See iwl-testmode.h */ static struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = { [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, }, [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, }, [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, }, [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, }, [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, }, [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, }, [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, }, [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, }, [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, }, [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, }, [IWL_TM_ATTR_TRACE_DATA] = { .type = NLA_UNSPEC, }, [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, }; /* * See the struct iwl_rx_packet in iwl-commands.h for the format of the * received events from the device */ static inline int get_event_length(struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); if (pkt) return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; else return 0; } /* * This function multicasts the spontaneous messages from the device to the * user space. It is invoked whenever there is a received messages * from the device. This function is called within the ISR of the rx handlers * in iwlagn driver. * * The parsing of the message content is left to the user space application, * The message content is treated as unattacked raw data and is encapsulated * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space. * * @priv: the instance of iwlwifi device * @rxb: pointer to rx data content received by the ISR * * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[]. * For the messages multicasting to the user application, the mandatory * TLV fields are : * IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT * IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content */ static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct ieee80211_hw *hw = priv->hw; struct sk_buff *skb; void *data; int length; data = (void *)rxb_addr(rxb); length = get_event_length(rxb); if (!data || length == 0) return; skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length, GFP_ATOMIC); if (skb == NULL) { IWL_DEBUG_INFO(priv, "Run out of memory for messages to user space ?\n"); return; } NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT); NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data); cfg80211_testmode_event(skb, GFP_ATOMIC); return; nla_put_failure: kfree_skb(skb); IWL_DEBUG_INFO(priv, "Ouch, overran buffer, check allocation!\n"); } void iwl_testmode_init(struct iwl_priv *priv) { priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt; priv->testmode_trace.trace_enabled = false; } static void iwl_trace_cleanup(struct iwl_priv *priv) { struct device *dev = &priv->pci_dev->dev; if (priv->testmode_trace.trace_enabled) { if (priv->testmode_trace.cpu_addr && priv->testmode_trace.dma_addr) dma_free_coherent(dev, TRACE_TOTAL_SIZE, priv->testmode_trace.cpu_addr, priv->testmode_trace.dma_addr); priv->testmode_trace.trace_enabled = false; priv->testmode_trace.cpu_addr = NULL; priv->testmode_trace.trace_addr = NULL; priv->testmode_trace.dma_addr = 0; } } void iwl_testmode_cleanup(struct iwl_priv *priv) { iwl_trace_cleanup(priv); } /* * This function handles the user application commands to the ucode. * * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the * host command to the ucode. * * If any mandatory field is missing, -ENOMSG is replied to the user space * application; otherwise, the actual execution result of the host command to * ucode is replied. * * @hw: ieee80211_hw object that represents the device * @tb: gnl message fields from the user space */ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb) { struct iwl_priv *priv = hw->priv; struct iwl_host_cmd cmd; memset(&cmd, 0, sizeof(struct iwl_host_cmd)); if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] || !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) { IWL_DEBUG_INFO(priv, "Error finding ucode command mandatory fields\n"); return -ENOMSG; } cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]); cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x," " len %d\n", cmd.id, cmd.flags, cmd.len[0]); /* ok, let's submit the command to ucode */ return iwl_send_cmd(priv, &cmd); } /* * This function handles the user application commands for register access. * * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the * handlers respectively. * * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32, * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating * the success of the command execution. * * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read * value is returned with IWL_TM_ATTR_REG_VALUE32. * * @hw: ieee80211_hw object that represents the device * @tb: gnl message fields from the user space */ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) { struct iwl_priv *priv = hw->priv; u32 ofs, val32; u8 val8; struct sk_buff *skb; int status = 0; if (!tb[IWL_TM_ATTR_REG_OFFSET]) { IWL_DEBUG_INFO(priv, "Error finding register offset\n"); return -ENOMSG; } ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]); IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs); switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_REG_READ32: val32 = iwl_read32(priv, ofs); IWL_INFO(priv, "32bit value to read 0x%x\n", val32); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); if (!skb) { IWL_DEBUG_INFO(priv, "Error allocating memory\n"); return -ENOMEM; } NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32); status = cfg80211_testmode_reply(skb); if (status < 0) IWL_DEBUG_INFO(priv, "Error sending msg : %d\n", status); break; case IWL_TM_CMD_APP2DEV_REG_WRITE32: if (!tb[IWL_TM_ATTR_REG_VALUE32]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); return -ENOMSG; } else { val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); IWL_INFO(priv, "32bit value to write 0x%x\n", val32); iwl_write32(priv, ofs, val32); } break; case IWL_TM_CMD_APP2DEV_REG_WRITE8: if (!tb[IWL_TM_ATTR_REG_VALUE8]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); return -ENOMSG; } else { val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); IWL_INFO(priv, "8bit value to write 0x%x\n", val8); iwl_write8(priv, ofs, val8); } break; default: IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n"); return -ENOSYS; } return status; nla_put_failure: kfree_skb(skb); return -EMSGSIZE; } static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv) { struct iwl_notification_wait calib_wait; int ret; iwlagn_init_notification_wait(priv, &calib_wait, CALIBRATION_COMPLETE_NOTIFICATION, NULL, NULL); ret = iwlagn_init_alive_start(priv); if (ret) { IWL_DEBUG_INFO(priv, "Error configuring init calibration: %d\n", ret); goto cfg_init_calib_error; } ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ); if (ret) IWL_DEBUG_INFO(priv, "Error detecting" " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret); return ret; cfg_init_calib_error: iwlagn_remove_notification(priv, &calib_wait); return ret; } /* * This function handles the user application commands for driver. * * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the * handlers respectively. * * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned * value of the actual command execution is replied to the user application. * * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP * is used for carry the message while IWL_TM_ATTR_COMMAND must set to * IWL_TM_CMD_DEV2APP_SYNC_RSP. * * @hw: ieee80211_hw object that represents the device * @tb: gnl message fields from the user space */ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) { struct iwl_priv *priv = hw->priv; struct sk_buff *skb; unsigned char *rsp_data_ptr = NULL; int status = 0, rsp_data_len = 0; switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_GET_DEVICENAME: rsp_data_ptr = (unsigned char *)priv->cfg->name; rsp_data_len = strlen(priv->cfg->name); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, rsp_data_len + 20); if (!skb) { IWL_DEBUG_INFO(priv, "Error allocating memory\n"); return -ENOMEM; } NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_SYNC_RSP); NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP, rsp_data_len, rsp_data_ptr); status = cfg80211_testmode_reply(skb); if (status < 0) IWL_DEBUG_INFO(priv, "Error sending msg : %d\n", status); break; case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init, UCODE_SUBTYPE_INIT, -1); if (status) IWL_DEBUG_INFO(priv, "Error loading init ucode: %d\n", status); break; case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: iwl_testmode_cfg_init_calib(priv); iwlagn_stop_device(priv); break; case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_rt, UCODE_SUBTYPE_REGULAR, UCODE_SUBTYPE_REGULAR_NEW); if (status) { IWL_DEBUG_INFO(priv, "Error loading runtime ucode: %d\n", status); break; } status = iwl_alive_start(priv); if (status) IWL_DEBUG_INFO(priv, "Error starting the device: %d\n", status); break; case IWL_TM_CMD_APP2DEV_GET_EEPROM: if (priv->eeprom) { skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, priv->cfg->base_params->eeprom_size + 20); if (!skb) { IWL_DEBUG_INFO(priv, "Error allocating memory\n"); return -ENOMEM; } NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_EEPROM_RSP); NLA_PUT(skb, IWL_TM_ATTR_EEPROM, priv->cfg->base_params->eeprom_size, priv->eeprom); status = cfg80211_testmode_reply(skb); if (status < 0) IWL_DEBUG_INFO(priv, "Error sending msg : %d\n", status); } else return -EFAULT; break; case IWL_TM_CMD_APP2DEV_FIXRATE_REQ: if (!tb[IWL_TM_ATTR_FIXRATE]) { IWL_DEBUG_INFO(priv, "Error finding fixrate setting\n"); return -ENOMSG; } priv->dbg_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]); break; default: IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n"); return -ENOSYS; } return status; nla_put_failure: kfree_skb(skb); return -EMSGSIZE; } /* * This function handles the user application commands for uCode trace * * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the * handlers respectively. * * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned * value of the actual command execution is replied to the user application. * * @hw: ieee80211_hw object that represents the device * @tb: gnl message fields from the user space */ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb) { struct iwl_priv *priv = hw->priv; struct sk_buff *skb; int status = 0; struct device *dev = &priv->pci_dev->dev; switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: if (priv->testmode_trace.trace_enabled) return -EBUSY; priv->testmode_trace.cpu_addr = dma_alloc_coherent(dev, TRACE_TOTAL_SIZE, &priv->testmode_trace.dma_addr, GFP_KERNEL); if (!priv->testmode_trace.cpu_addr) return -ENOMEM; priv->testmode_trace.trace_enabled = true; priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN( priv->testmode_trace.cpu_addr, 0x100); memset(priv->testmode_trace.trace_addr, 0x03B, TRACE_BUFF_SIZE); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, sizeof(priv->testmode_trace.dma_addr) + 20); if (!skb) { IWL_DEBUG_INFO(priv, "Error allocating memory\n"); iwl_trace_cleanup(priv); return -ENOMEM; } NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR, sizeof(priv->testmode_trace.dma_addr), (u64 *)&priv->testmode_trace.dma_addr); status = cfg80211_testmode_reply(skb); if (status < 0) { IWL_DEBUG_INFO(priv, "Error sending msg : %d\n", status); } break; case IWL_TM_CMD_APP2DEV_END_TRACE: iwl_trace_cleanup(priv); break; case IWL_TM_CMD_APP2DEV_READ_TRACE: if (priv->testmode_trace.trace_enabled && priv->testmode_trace.trace_addr) { skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + TRACE_BUFF_SIZE); if (skb == NULL) { IWL_DEBUG_INFO(priv, "Error allocating memory\n"); return -ENOMEM; } NLA_PUT(skb, IWL_TM_ATTR_TRACE_DATA, TRACE_BUFF_SIZE, priv->testmode_trace.trace_addr); status = cfg80211_testmode_reply(skb); if (status < 0) { IWL_DEBUG_INFO(priv, "Error sending msg : %d\n", status); } } else return -EFAULT; break; default: IWL_DEBUG_INFO(priv, "Unknown testmode mem command ID\n"); return -ENOSYS; } return status; nla_put_failure: kfree_skb(skb); if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) == IWL_TM_CMD_APP2DEV_BEGIN_TRACE) iwl_trace_cleanup(priv); return -EMSGSIZE; } /* The testmode gnl message handler that takes the gnl message from the * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then * invoke the corresponding handlers. * * This function is invoked when there is user space application sending * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated * by nl80211. * * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before * dispatching it to the corresponding handler. * * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application; * -ENOSYS is replied to the user application if the command is unknown; * Otherwise, the command is dispatched to the respective handler. * * @hw: ieee80211_hw object that represents the device * @data: pointer to user space message * @len: length in byte of @data */ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) { struct nlattr *tb[IWL_TM_ATTR_MAX - 1]; struct iwl_priv *priv = hw->priv; int result; result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, iwl_testmode_gnl_msg_policy); if (result != 0) { IWL_DEBUG_INFO(priv, "Error parsing the gnl message : %d\n", result); return result; } /* IWL_TM_ATTR_COMMAND is absolutely mandatory */ if (!tb[IWL_TM_ATTR_COMMAND]) { IWL_DEBUG_INFO(priv, "Error finding testmode command type\n"); return -ENOMSG; } /* in case multiple accesses to the device happens */ mutex_lock(&priv->mutex); switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_UCODE: IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n"); result = iwl_testmode_ucode(hw, tb); break; case IWL_TM_CMD_APP2DEV_REG_READ32: case IWL_TM_CMD_APP2DEV_REG_WRITE32: case IWL_TM_CMD_APP2DEV_REG_WRITE8: IWL_DEBUG_INFO(priv, "testmode cmd to register\n"); result = iwl_testmode_reg(hw, tb); break; case IWL_TM_CMD_APP2DEV_GET_DEVICENAME: case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: case IWL_TM_CMD_APP2DEV_GET_EEPROM: case IWL_TM_CMD_APP2DEV_FIXRATE_REQ: IWL_DEBUG_INFO(priv, "testmode cmd to driver\n"); result = iwl_testmode_driver(hw, tb); break; case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: case IWL_TM_CMD_APP2DEV_END_TRACE: case IWL_TM_CMD_APP2DEV_READ_TRACE: IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n"); result = iwl_testmode_trace(hw, tb); break; default: IWL_DEBUG_INFO(priv, "Unknown testmode command\n"); result = -ENOSYS; break; } mutex_unlock(&priv->mutex); return result; }
gpl-2.0
kgp700/Neok-GNexroid-Kernel-JB
drivers/net/ethoc.c
2454
29405
/* * linux/drivers/net/ethoc.c * * Copyright (C) 2007-2008 Avionic Design Development GmbH * Copyright (C) 2008-2009 Avionic Design GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Written by Thierry Reding <thierry.reding@avionic-design.de> */ #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/io.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/of.h> #include <net/ethoc.h> static int buffer_size = 0x8000; /* 32 KBytes */ module_param(buffer_size, int, 0); MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); /* register offsets */ #define MODER 0x00 #define INT_SOURCE 0x04 #define INT_MASK 0x08 #define IPGT 0x0c #define IPGR1 0x10 #define IPGR2 0x14 #define PACKETLEN 0x18 #define COLLCONF 0x1c #define TX_BD_NUM 0x20 #define CTRLMODER 0x24 #define MIIMODER 0x28 #define MIICOMMAND 0x2c #define MIIADDRESS 0x30 #define MIITX_DATA 0x34 #define MIIRX_DATA 0x38 #define MIISTATUS 0x3c #define MAC_ADDR0 0x40 #define MAC_ADDR1 0x44 #define ETH_HASH0 0x48 #define ETH_HASH1 0x4c #define ETH_TXCTRL 0x50 /* mode register */ #define MODER_RXEN (1 << 0) /* receive enable */ #define MODER_TXEN (1 << 1) /* transmit enable */ #define MODER_NOPRE (1 << 2) /* no preamble */ #define MODER_BRO (1 << 3) /* broadcast address */ #define MODER_IAM (1 << 4) /* individual address mode */ #define MODER_PRO (1 << 5) /* promiscuous mode */ #define MODER_IFG (1 << 6) /* interframe gap for incoming frames */ #define MODER_LOOP (1 << 7) /* loopback */ #define MODER_NBO (1 << 8) /* no back-off */ #define MODER_EDE (1 << 9) /* excess defer enable */ #define MODER_FULLD (1 << 10) /* full duplex */ #define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */ #define MODER_DCRC (1 << 12) /* delayed CRC enable */ #define MODER_CRC (1 << 13) /* CRC enable */ #define MODER_HUGE (1 << 14) /* huge packets enable */ #define MODER_PAD (1 << 15) /* padding enabled */ #define MODER_RSM (1 << 16) /* receive small packets */ /* interrupt source and mask registers */ #define INT_MASK_TXF (1 << 0) /* transmit frame */ #define INT_MASK_TXE (1 << 1) /* transmit error */ #define INT_MASK_RXF (1 << 2) /* receive frame */ #define INT_MASK_RXE (1 << 3) /* receive error */ #define INT_MASK_BUSY (1 << 4) #define INT_MASK_TXC (1 << 5) /* transmit control frame */ #define INT_MASK_RXC (1 << 6) /* receive control frame */ #define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE) #define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE) #define INT_MASK_ALL ( \ INT_MASK_TXF | INT_MASK_TXE | \ INT_MASK_RXF | INT_MASK_RXE | \ INT_MASK_TXC | INT_MASK_RXC | \ INT_MASK_BUSY \ ) /* packet length register */ #define PACKETLEN_MIN(min) (((min) & 0xffff) << 16) #define PACKETLEN_MAX(max) (((max) & 0xffff) << 0) #define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \ PACKETLEN_MAX(max)) /* transmit buffer number register */ #define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80) /* control module mode register */ #define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */ #define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */ #define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */ /* MII mode register */ #define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */ #define MIIMODER_NOPRE (1 << 8) /* no preamble */ /* MII command register */ #define MIICOMMAND_SCAN (1 << 0) /* scan status */ #define MIICOMMAND_READ (1 << 1) /* read status */ #define MIICOMMAND_WRITE (1 << 2) /* write control data */ /* MII address register */ #define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0) #define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8) #define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \ MIIADDRESS_RGAD(reg)) /* MII transmit data register */ #define MIITX_DATA_VAL(x) ((x) & 0xffff) /* MII receive data register */ #define MIIRX_DATA_VAL(x) ((x) & 0xffff) /* MII status register */ #define MIISTATUS_LINKFAIL (1 << 0) #define MIISTATUS_BUSY (1 << 1) #define MIISTATUS_INVALID (1 << 2) /* TX buffer descriptor */ #define TX_BD_CS (1 << 0) /* carrier sense lost */ #define TX_BD_DF (1 << 1) /* defer indication */ #define TX_BD_LC (1 << 2) /* late collision */ #define TX_BD_RL (1 << 3) /* retransmission limit */ #define TX_BD_RETRY_MASK (0x00f0) #define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4) #define TX_BD_UR (1 << 8) /* transmitter underrun */ #define TX_BD_CRC (1 << 11) /* TX CRC enable */ #define TX_BD_PAD (1 << 12) /* pad enable for short packets */ #define TX_BD_WRAP (1 << 13) #define TX_BD_IRQ (1 << 14) /* interrupt request enable */ #define TX_BD_READY (1 << 15) /* TX buffer ready */ #define TX_BD_LEN(x) (((x) & 0xffff) << 16) #define TX_BD_LEN_MASK (0xffff << 16) #define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \ TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR) /* RX buffer descriptor */ #define RX_BD_LC (1 << 0) /* late collision */ #define RX_BD_CRC (1 << 1) /* RX CRC error */ #define RX_BD_SF (1 << 2) /* short frame */ #define RX_BD_TL (1 << 3) /* too long */ #define RX_BD_DN (1 << 4) /* dribble nibble */ #define RX_BD_IS (1 << 5) /* invalid symbol */ #define RX_BD_OR (1 << 6) /* receiver overrun */ #define RX_BD_MISS (1 << 7) #define RX_BD_CF (1 << 8) /* control frame */ #define RX_BD_WRAP (1 << 13) #define RX_BD_IRQ (1 << 14) /* interrupt request enable */ #define RX_BD_EMPTY (1 << 15) #define RX_BD_LEN(x) (((x) & 0xffff) << 16) #define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \ RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS) #define ETHOC_BUFSIZ 1536 #define ETHOC_ZLEN 64 #define ETHOC_BD_BASE 0x400 #define ETHOC_TIMEOUT (HZ / 2) #define ETHOC_MII_TIMEOUT (1 + (HZ / 5)) /** * struct ethoc - driver-private device structure * @iobase: pointer to I/O memory region * @membase: pointer to buffer memory region * @dma_alloc: dma allocated buffer size * @io_region_size: I/O memory region size * @num_tx: number of send buffers * @cur_tx: last send buffer written * @dty_tx: last buffer actually sent * @num_rx: number of receive buffers * @cur_rx: current receive buffer * @vma: pointer to array of virtual memory addresses for buffers * @netdev: pointer to network device structure * @napi: NAPI structure * @msg_enable: device state flags * @lock: device lock * @phy: attached PHY * @mdio: MDIO bus for PHY access * @phy_id: address of attached PHY */ struct ethoc { void __iomem *iobase; void __iomem *membase; int dma_alloc; resource_size_t io_region_size; unsigned int num_tx; unsigned int cur_tx; unsigned int dty_tx; unsigned int num_rx; unsigned int cur_rx; void** vma; struct net_device *netdev; struct napi_struct napi; u32 msg_enable; spinlock_t lock; struct phy_device *phy; struct mii_bus *mdio; s8 phy_id; }; /** * struct ethoc_bd - buffer descriptor * @stat: buffer statistics * @addr: physical memory address */ struct ethoc_bd { u32 stat; u32 addr; }; static inline u32 ethoc_read(struct ethoc *dev, loff_t offset) { return ioread32(dev->iobase + offset); } static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) { iowrite32(data, dev->iobase + offset); } static inline void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) { loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); bd->stat = ethoc_read(dev, offset + 0); bd->addr = ethoc_read(dev, offset + 4); } static inline void ethoc_write_bd(struct ethoc *dev, int index, const struct ethoc_bd *bd) { loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); ethoc_write(dev, offset + 0, bd->stat); ethoc_write(dev, offset + 4, bd->addr); } static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask) { u32 imask = ethoc_read(dev, INT_MASK); imask |= mask; ethoc_write(dev, INT_MASK, imask); } static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask) { u32 imask = ethoc_read(dev, INT_MASK); imask &= ~mask; ethoc_write(dev, INT_MASK, imask); } static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask) { ethoc_write(dev, INT_SOURCE, mask); } static inline void ethoc_enable_rx_and_tx(struct ethoc *dev) { u32 mode = ethoc_read(dev, MODER); mode |= MODER_RXEN | MODER_TXEN; ethoc_write(dev, MODER, mode); } static inline void ethoc_disable_rx_and_tx(struct ethoc *dev) { u32 mode = ethoc_read(dev, MODER); mode &= ~(MODER_RXEN | MODER_TXEN); ethoc_write(dev, MODER, mode); } static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start) { struct ethoc_bd bd; int i; void* vma; dev->cur_tx = 0; dev->dty_tx = 0; dev->cur_rx = 0; ethoc_write(dev, TX_BD_NUM, dev->num_tx); /* setup transmission buffers */ bd.addr = mem_start; bd.stat = TX_BD_IRQ | TX_BD_CRC; vma = dev->membase; for (i = 0; i < dev->num_tx; i++) { if (i == dev->num_tx - 1) bd.stat |= TX_BD_WRAP; ethoc_write_bd(dev, i, &bd); bd.addr += ETHOC_BUFSIZ; dev->vma[i] = vma; vma += ETHOC_BUFSIZ; } bd.stat = RX_BD_EMPTY | RX_BD_IRQ; for (i = 0; i < dev->num_rx; i++) { if (i == dev->num_rx - 1) bd.stat |= RX_BD_WRAP; ethoc_write_bd(dev, dev->num_tx + i, &bd); bd.addr += ETHOC_BUFSIZ; dev->vma[dev->num_tx + i] = vma; vma += ETHOC_BUFSIZ; } return 0; } static int ethoc_reset(struct ethoc *dev) { u32 mode; /* TODO: reset controller? */ ethoc_disable_rx_and_tx(dev); /* TODO: setup registers */ /* enable FCS generation and automatic padding */ mode = ethoc_read(dev, MODER); mode |= MODER_CRC | MODER_PAD; ethoc_write(dev, MODER, mode); /* set full-duplex mode */ mode = ethoc_read(dev, MODER); mode |= MODER_FULLD; ethoc_write(dev, MODER, mode); ethoc_write(dev, IPGT, 0x15); ethoc_ack_irq(dev, INT_MASK_ALL); ethoc_enable_irq(dev, INT_MASK_ALL); ethoc_enable_rx_and_tx(dev); return 0; } static unsigned int ethoc_update_rx_stats(struct ethoc *dev, struct ethoc_bd *bd) { struct net_device *netdev = dev->netdev; unsigned int ret = 0; if (bd->stat & RX_BD_TL) { dev_err(&netdev->dev, "RX: frame too long\n"); netdev->stats.rx_length_errors++; ret++; } if (bd->stat & RX_BD_SF) { dev_err(&netdev->dev, "RX: frame too short\n"); netdev->stats.rx_length_errors++; ret++; } if (bd->stat & RX_BD_DN) { dev_err(&netdev->dev, "RX: dribble nibble\n"); netdev->stats.rx_frame_errors++; } if (bd->stat & RX_BD_CRC) { dev_err(&netdev->dev, "RX: wrong CRC\n"); netdev->stats.rx_crc_errors++; ret++; } if (bd->stat & RX_BD_OR) { dev_err(&netdev->dev, "RX: overrun\n"); netdev->stats.rx_over_errors++; ret++; } if (bd->stat & RX_BD_MISS) netdev->stats.rx_missed_errors++; if (bd->stat & RX_BD_LC) { dev_err(&netdev->dev, "RX: late collision\n"); netdev->stats.collisions++; ret++; } return ret; } static int ethoc_rx(struct net_device *dev, int limit) { struct ethoc *priv = netdev_priv(dev); int count; for (count = 0; count < limit; ++count) { unsigned int entry; struct ethoc_bd bd; entry = priv->num_tx + priv->cur_rx; ethoc_read_bd(priv, entry, &bd); if (bd.stat & RX_BD_EMPTY) { ethoc_ack_irq(priv, INT_MASK_RX); /* If packet (interrupt) came in between checking * BD_EMTPY and clearing the interrupt source, then we * risk missing the packet as the RX interrupt won't * trigger right away when we reenable it; hence, check * BD_EMTPY here again to make sure there isn't such a * packet waiting for us... */ ethoc_read_bd(priv, entry, &bd); if (bd.stat & RX_BD_EMPTY) break; } if (ethoc_update_rx_stats(priv, &bd) == 0) { int size = bd.stat >> 16; struct sk_buff *skb; size -= 4; /* strip the CRC */ skb = netdev_alloc_skb_ip_align(dev, size); if (likely(skb)) { void *src = priv->vma[entry]; memcpy_fromio(skb_put(skb, size), src, size); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += size; netif_receive_skb(skb); } else { if (net_ratelimit()) dev_warn(&dev->dev, "low on memory - " "packet dropped\n"); dev->stats.rx_dropped++; break; } } /* clear the buffer descriptor so it can be reused */ bd.stat &= ~RX_BD_STATS; bd.stat |= RX_BD_EMPTY; ethoc_write_bd(priv, entry, &bd); if (++priv->cur_rx == priv->num_rx) priv->cur_rx = 0; } return count; } static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) { struct net_device *netdev = dev->netdev; if (bd->stat & TX_BD_LC) { dev_err(&netdev->dev, "TX: late collision\n"); netdev->stats.tx_window_errors++; } if (bd->stat & TX_BD_RL) { dev_err(&netdev->dev, "TX: retransmit limit\n"); netdev->stats.tx_aborted_errors++; } if (bd->stat & TX_BD_UR) { dev_err(&netdev->dev, "TX: underrun\n"); netdev->stats.tx_fifo_errors++; } if (bd->stat & TX_BD_CS) { dev_err(&netdev->dev, "TX: carrier sense lost\n"); netdev->stats.tx_carrier_errors++; } if (bd->stat & TX_BD_STATS) netdev->stats.tx_errors++; netdev->stats.collisions += (bd->stat >> 4) & 0xf; netdev->stats.tx_bytes += bd->stat >> 16; netdev->stats.tx_packets++; } static int ethoc_tx(struct net_device *dev, int limit) { struct ethoc *priv = netdev_priv(dev); int count; struct ethoc_bd bd; for (count = 0; count < limit; ++count) { unsigned int entry; entry = priv->dty_tx & (priv->num_tx-1); ethoc_read_bd(priv, entry, &bd); if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) { ethoc_ack_irq(priv, INT_MASK_TX); /* If interrupt came in between reading in the BD * and clearing the interrupt source, then we risk * missing the event as the TX interrupt won't trigger * right away when we reenable it; hence, check * BD_EMPTY here again to make sure there isn't such an * event pending... */ ethoc_read_bd(priv, entry, &bd); if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) break; } ethoc_update_tx_stats(priv, &bd); priv->dty_tx++; } if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) netif_wake_queue(dev); return count; } static irqreturn_t ethoc_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct ethoc *priv = netdev_priv(dev); u32 pending; u32 mask; /* Figure out what triggered the interrupt... * The tricky bit here is that the interrupt source bits get * set in INT_SOURCE for an event regardless of whether that * event is masked or not. Thus, in order to figure out what * triggered the interrupt, we need to remove the sources * for all events that are currently masked. This behaviour * is not particularly well documented but reasonable... */ mask = ethoc_read(priv, INT_MASK); pending = ethoc_read(priv, INT_SOURCE); pending &= mask; if (unlikely(pending == 0)) { return IRQ_NONE; } ethoc_ack_irq(priv, pending); /* We always handle the dropped packet interrupt */ if (pending & INT_MASK_BUSY) { dev_err(&dev->dev, "packet dropped\n"); dev->stats.rx_dropped++; } /* Handle receive/transmit event by switching to polling */ if (pending & (INT_MASK_TX | INT_MASK_RX)) { ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX); napi_schedule(&priv->napi); } return IRQ_HANDLED; } static int ethoc_get_mac_address(struct net_device *dev, void *addr) { struct ethoc *priv = netdev_priv(dev); u8 *mac = (u8 *)addr; u32 reg; reg = ethoc_read(priv, MAC_ADDR0); mac[2] = (reg >> 24) & 0xff; mac[3] = (reg >> 16) & 0xff; mac[4] = (reg >> 8) & 0xff; mac[5] = (reg >> 0) & 0xff; reg = ethoc_read(priv, MAC_ADDR1); mac[0] = (reg >> 8) & 0xff; mac[1] = (reg >> 0) & 0xff; return 0; } static int ethoc_poll(struct napi_struct *napi, int budget) { struct ethoc *priv = container_of(napi, struct ethoc, napi); int rx_work_done = 0; int tx_work_done = 0; rx_work_done = ethoc_rx(priv->netdev, budget); tx_work_done = ethoc_tx(priv->netdev, budget); if (rx_work_done < budget && tx_work_done < budget) { napi_complete(napi); ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); } return rx_work_done; } static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) { struct ethoc *priv = bus->priv; int i; ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); for (i=0; i < 5; i++) { u32 status = ethoc_read(priv, MIISTATUS); if (!(status & MIISTATUS_BUSY)) { u32 data = ethoc_read(priv, MIIRX_DATA); /* reset MII command register */ ethoc_write(priv, MIICOMMAND, 0); return data; } usleep_range(100,200); } return -EBUSY; } static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) { struct ethoc *priv = bus->priv; int i; ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); ethoc_write(priv, MIITX_DATA, val); ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); for (i=0; i < 5; i++) { u32 stat = ethoc_read(priv, MIISTATUS); if (!(stat & MIISTATUS_BUSY)) { /* reset MII command register */ ethoc_write(priv, MIICOMMAND, 0); return 0; } usleep_range(100,200); } return -EBUSY; } static int ethoc_mdio_reset(struct mii_bus *bus) { return 0; } static void ethoc_mdio_poll(struct net_device *dev) { } static int __devinit ethoc_mdio_probe(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); struct phy_device *phy; int err; if (priv->phy_id != -1) { phy = priv->mdio->phy_map[priv->phy_id]; } else { phy = phy_find_first(priv->mdio); } if (!phy) { dev_err(&dev->dev, "no PHY found\n"); return -ENXIO; } err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0, PHY_INTERFACE_MODE_GMII); if (err) { dev_err(&dev->dev, "could not attach to PHY\n"); return err; } priv->phy = phy; return 0; } static int ethoc_open(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); int ret; ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED, dev->name, dev); if (ret) return ret; ethoc_init_ring(priv, dev->mem_start); ethoc_reset(priv); if (netif_queue_stopped(dev)) { dev_dbg(&dev->dev, " resuming queue\n"); netif_wake_queue(dev); } else { dev_dbg(&dev->dev, " starting queue\n"); netif_start_queue(dev); } phy_start(priv->phy); napi_enable(&priv->napi); if (netif_msg_ifup(priv)) { dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", dev->base_addr, dev->mem_start, dev->mem_end); } return 0; } static int ethoc_stop(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); napi_disable(&priv->napi); if (priv->phy) phy_stop(priv->phy); ethoc_disable_rx_and_tx(priv); free_irq(dev->irq, dev); if (!netif_queue_stopped(dev)) netif_stop_queue(dev); return 0; } static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct ethoc *priv = netdev_priv(dev); struct mii_ioctl_data *mdio = if_mii(ifr); struct phy_device *phy = NULL; if (!netif_running(dev)) return -EINVAL; if (cmd != SIOCGMIIPHY) { if (mdio->phy_id >= PHY_MAX_ADDR) return -ERANGE; phy = priv->mdio->phy_map[mdio->phy_id]; if (!phy) return -ENODEV; } else { phy = priv->phy; } return phy_mii_ioctl(phy, ifr, cmd); } static int ethoc_config(struct net_device *dev, struct ifmap *map) { return -ENOSYS; } static int ethoc_set_mac_address(struct net_device *dev, void *addr) { struct ethoc *priv = netdev_priv(dev); u8 *mac = (u8 *)addr; ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | (mac[5] << 0)); ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); return 0; } static void ethoc_set_multicast_list(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); u32 mode = ethoc_read(priv, MODER); struct netdev_hw_addr *ha; u32 hash[2] = { 0, 0 }; /* set loopback mode if requested */ if (dev->flags & IFF_LOOPBACK) mode |= MODER_LOOP; else mode &= ~MODER_LOOP; /* receive broadcast frames if requested */ if (dev->flags & IFF_BROADCAST) mode &= ~MODER_BRO; else mode |= MODER_BRO; /* enable promiscuous mode if requested */ if (dev->flags & IFF_PROMISC) mode |= MODER_PRO; else mode &= ~MODER_PRO; ethoc_write(priv, MODER, mode); /* receive multicast frames */ if (dev->flags & IFF_ALLMULTI) { hash[0] = 0xffffffff; hash[1] = 0xffffffff; } else { netdev_for_each_mc_addr(ha, dev) { u32 crc = ether_crc(ETH_ALEN, ha->addr); int bit = (crc >> 26) & 0x3f; hash[bit >> 5] |= 1 << (bit & 0x1f); } } ethoc_write(priv, ETH_HASH0, hash[0]); ethoc_write(priv, ETH_HASH1, hash[1]); } static int ethoc_change_mtu(struct net_device *dev, int new_mtu) { return -ENOSYS; } static void ethoc_tx_timeout(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); u32 pending = ethoc_read(priv, INT_SOURCE); if (likely(pending)) ethoc_interrupt(dev->irq, dev); } static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); struct ethoc_bd bd; unsigned int entry; void *dest; if (unlikely(skb->len > ETHOC_BUFSIZ)) { dev->stats.tx_errors++; goto out; } entry = priv->cur_tx % priv->num_tx; spin_lock_irq(&priv->lock); priv->cur_tx++; ethoc_read_bd(priv, entry, &bd); if (unlikely(skb->len < ETHOC_ZLEN)) bd.stat |= TX_BD_PAD; else bd.stat &= ~TX_BD_PAD; dest = priv->vma[entry]; memcpy_toio(dest, skb->data, skb->len); bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); bd.stat |= TX_BD_LEN(skb->len); ethoc_write_bd(priv, entry, &bd); bd.stat |= TX_BD_READY; ethoc_write_bd(priv, entry, &bd); if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) { dev_dbg(&dev->dev, "stopping queue\n"); netif_stop_queue(dev); } spin_unlock_irq(&priv->lock); out: dev_kfree_skb(skb); return NETDEV_TX_OK; } static const struct net_device_ops ethoc_netdev_ops = { .ndo_open = ethoc_open, .ndo_stop = ethoc_stop, .ndo_do_ioctl = ethoc_ioctl, .ndo_set_config = ethoc_config, .ndo_set_mac_address = ethoc_set_mac_address, .ndo_set_multicast_list = ethoc_set_multicast_list, .ndo_change_mtu = ethoc_change_mtu, .ndo_tx_timeout = ethoc_tx_timeout, .ndo_start_xmit = ethoc_start_xmit, }; /** * ethoc_probe() - initialize OpenCores ethernet MAC * pdev: platform device */ static int __devinit ethoc_probe(struct platform_device *pdev) { struct net_device *netdev = NULL; struct resource *res = NULL; struct resource *mmio = NULL; struct resource *mem = NULL; struct ethoc *priv = NULL; unsigned int phy; int num_bd; int ret = 0; /* allocate networking device */ netdev = alloc_etherdev(sizeof(struct ethoc)); if (!netdev) { dev_err(&pdev->dev, "cannot allocate network device\n"); ret = -ENOMEM; goto out; } SET_NETDEV_DEV(netdev, &pdev->dev); platform_set_drvdata(pdev, netdev); /* obtain I/O memory space */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain I/O memory space\n"); ret = -ENXIO; goto free; } mmio = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), res->name); if (!mmio) { dev_err(&pdev->dev, "cannot request I/O memory space\n"); ret = -ENXIO; goto free; } netdev->base_addr = mmio->start; /* obtain buffer memory space */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), res->name); if (!mem) { dev_err(&pdev->dev, "cannot request memory space\n"); ret = -ENXIO; goto free; } netdev->mem_start = mem->start; netdev->mem_end = mem->end; } /* obtain device IRQ number */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain IRQ\n"); ret = -ENXIO; goto free; } netdev->irq = res->start; /* setup driver-private data */ priv = netdev_priv(netdev); priv->netdev = netdev; priv->dma_alloc = 0; priv->io_region_size = mmio->end - mmio->start + 1; priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, resource_size(mmio)); if (!priv->iobase) { dev_err(&pdev->dev, "cannot remap I/O memory space\n"); ret = -ENXIO; goto error; } if (netdev->mem_end) { priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, resource_size(mem)); if (!priv->membase) { dev_err(&pdev->dev, "cannot remap memory space\n"); ret = -ENXIO; goto error; } } else { /* Allocate buffer memory */ priv->membase = dmam_alloc_coherent(&pdev->dev, buffer_size, (void *)&netdev->mem_start, GFP_KERNEL); if (!priv->membase) { dev_err(&pdev->dev, "cannot allocate %dB buffer\n", buffer_size); ret = -ENOMEM; goto error; } netdev->mem_end = netdev->mem_start + buffer_size; priv->dma_alloc = buffer_size; } /* calculate the number of TX/RX buffers, maximum 128 supported */ num_bd = min_t(unsigned int, 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); if (num_bd < 4) { ret = -ENODEV; goto error; } /* num_tx must be a power of two */ priv->num_tx = rounddown_pow_of_two(num_bd >> 1); priv->num_rx = num_bd - priv->num_tx; dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n", priv->num_tx, priv->num_rx); priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); if (!priv->vma) { ret = -ENOMEM; goto error; } /* Allow the platform setup code to pass in a MAC address. */ if (pdev->dev.platform_data) { struct ethoc_platform_data *pdata = pdev->dev.platform_data; memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); priv->phy_id = pdata->phy_id; } else { priv->phy_id = -1; #ifdef CONFIG_OF { const uint8_t* mac; mac = of_get_property(pdev->dev.of_node, "local-mac-address", NULL); if (mac) memcpy(netdev->dev_addr, mac, IFHWADDRLEN); } #endif } /* Check that the given MAC address is valid. If it isn't, read the * current MAC from the controller. */ if (!is_valid_ether_addr(netdev->dev_addr)) ethoc_get_mac_address(netdev, netdev->dev_addr); /* Check the MAC again for validity, if it still isn't choose and * program a random one. */ if (!is_valid_ether_addr(netdev->dev_addr)) random_ether_addr(netdev->dev_addr); ethoc_set_mac_address(netdev, netdev->dev_addr); /* register MII bus */ priv->mdio = mdiobus_alloc(); if (!priv->mdio) { ret = -ENOMEM; goto free; } priv->mdio->name = "ethoc-mdio"; snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d", priv->mdio->name, pdev->id); priv->mdio->read = ethoc_mdio_read; priv->mdio->write = ethoc_mdio_write; priv->mdio->reset = ethoc_mdio_reset; priv->mdio->priv = priv; priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!priv->mdio->irq) { ret = -ENOMEM; goto free_mdio; } for (phy = 0; phy < PHY_MAX_ADDR; phy++) priv->mdio->irq[phy] = PHY_POLL; ret = mdiobus_register(priv->mdio); if (ret) { dev_err(&netdev->dev, "failed to register MDIO bus\n"); goto free_mdio; } ret = ethoc_mdio_probe(netdev); if (ret) { dev_err(&netdev->dev, "failed to probe MDIO bus\n"); goto error; } ether_setup(netdev); /* setup the net_device structure */ netdev->netdev_ops = &ethoc_netdev_ops; netdev->watchdog_timeo = ETHOC_TIMEOUT; netdev->features |= 0; /* setup NAPI */ netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); spin_lock_init(&priv->lock); ret = register_netdev(netdev); if (ret < 0) { dev_err(&netdev->dev, "failed to register interface\n"); goto error2; } goto out; error2: netif_napi_del(&priv->napi); error: mdiobus_unregister(priv->mdio); free_mdio: kfree(priv->mdio->irq); mdiobus_free(priv->mdio); free: free_netdev(netdev); out: return ret; } /** * ethoc_remove() - shutdown OpenCores ethernet MAC * @pdev: platform device */ static int __devexit ethoc_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct ethoc *priv = netdev_priv(netdev); platform_set_drvdata(pdev, NULL); if (netdev) { netif_napi_del(&priv->napi); phy_disconnect(priv->phy); priv->phy = NULL; if (priv->mdio) { mdiobus_unregister(priv->mdio); kfree(priv->mdio->irq); mdiobus_free(priv->mdio); } unregister_netdev(netdev); free_netdev(netdev); } return 0; } #ifdef CONFIG_PM static int ethoc_suspend(struct platform_device *pdev, pm_message_t state) { return -ENOSYS; } static int ethoc_resume(struct platform_device *pdev) { return -ENOSYS; } #else # define ethoc_suspend NULL # define ethoc_resume NULL #endif static struct of_device_id ethoc_match[] = { { .compatible = "opencores,ethoc", }, {}, }; MODULE_DEVICE_TABLE(of, ethoc_match); static struct platform_driver ethoc_driver = { .probe = ethoc_probe, .remove = __devexit_p(ethoc_remove), .suspend = ethoc_suspend, .resume = ethoc_resume, .driver = { .name = "ethoc", .owner = THIS_MODULE, .of_match_table = ethoc_match, }, }; static int __init ethoc_init(void) { return platform_driver_register(&ethoc_driver); } static void __exit ethoc_exit(void) { platform_driver_unregister(&ethoc_driver); } module_init(ethoc_init); module_exit(ethoc_exit); MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); MODULE_DESCRIPTION("OpenCores Ethernet MAC driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Fechinator/FechdaKernel_V500
arch/arm/mach-msm/nohlt.c
2710
1124
/* Copyright (c) 2009, 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * MSM architecture driver to control arm halt behavior */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/fs.h> #include <asm/system.h> static int set_nohalt(void *data, u64 val) { if (val) disable_hlt(); else enable_hlt(); return 0; } static int get_nohalt(void *data, u64 *val) { *val = (unsigned int)get_hlt(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(nohalt_ops, get_nohalt, set_nohalt, "%llu\n"); static int __init init_hlt_debug(void) { debugfs_create_file("nohlt", 0600, NULL, NULL, &nohalt_ops); return 0; } late_initcall(init_hlt_debug);
gpl-2.0
ezterry/kernel-biff-testing
arch/cris/arch-v10/kernel/process.c
3222
7618
/* * linux/arch/cris/kernel/process.c * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 2000-2002 Axis Communications AB * * Authors: Bjorn Wesen (bjornw@axis.com) * Mikael Starvik (starvik@axis.com) * * This file handles the architecture-dependent parts of process handling.. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/fs.h> #include <arch/svinto.h> #include <linux/init.h> #ifdef CONFIG_ETRAX_GPIO void etrax_gpio_wake_up_check(void); /* drivers/gpio.c */ #endif /* * We use this if we don't have any better * idle routine.. */ void default_idle(void) { #ifdef CONFIG_ETRAX_GPIO etrax_gpio_wake_up_check(); #endif } /* * Free current thread data structures etc.. */ void exit_thread(void) { /* Nothing needs to be done. */ } /* if the watchdog is enabled, we can simply disable interrupts and go * into an eternal loop, and the watchdog will reset the CPU after 0.1s * if on the other hand the watchdog wasn't enabled, we just enable it and wait */ void hard_reset_now (void) { /* * Don't declare this variable elsewhere. We don't want any other * code to know about it than the watchdog handler in entry.S and * this code, implementing hard reset through the watchdog. */ #if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM) extern int cause_of_death; #endif printk("*** HARD RESET ***\n"); local_irq_disable(); #if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM) cause_of_death = 0xbedead; #else /* Since we dont plan to keep on resetting the watchdog, the key can be arbitrary hence three */ *R_WATCHDOG = IO_FIELD(R_WATCHDOG, key, 3) | IO_STATE(R_WATCHDOG, enable, start); #endif while(1) /* waiting for RETRIBUTION! */ ; } /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *t) { return task_pt_regs(t)->irp; } static void kernel_thread_helper(void* dummy, int (*fn)(void *), void * arg) { fn(arg); do_exit(-1); /* Should never be called, return bad exit value */ } /* * Create a kernel thread */ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) { struct pt_regs regs; memset(&regs, 0, sizeof(regs)); /* Don't use r10 since that is set to 0 in copy_thread */ regs.r11 = (unsigned long)fn; regs.r12 = (unsigned long)arg; regs.irp = (unsigned long)kernel_thread_helper; regs.dccr = 1 << I_DCCR_BITNR; /* Ok, create the new process.. */ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); } /* setup the child's kernel stack with a pt_regs and switch_stack on it. * it will be un-nested during _resume and _ret_from_sys_call when the * new thread is scheduled. * * also setup the thread switching structure which is used to keep * thread-specific data during _resumes. * */ asmlinkage void ret_from_fork(void); int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct pt_regs * childregs; struct switch_stack *swstack; /* put the pt_regs structure at the end of the new kernel stack page and fix it up * remember that the task_struct doubles as the kernel stack for the task */ childregs = task_pt_regs(p); *childregs = *regs; /* struct copy of pt_regs */ p->set_child_tid = p->clear_child_tid = NULL; childregs->r10 = 0; /* child returns 0 after a fork/clone */ /* put the switch stack right below the pt_regs */ swstack = ((struct switch_stack *)childregs) - 1; swstack->r9 = 0; /* parameter to ret_from_sys_call, 0 == dont restart the syscall */ /* we want to return into ret_from_sys_call after the _resume */ swstack->return_ip = (unsigned long) ret_from_fork; /* Will call ret_from_sys_call */ /* fix the user-mode stackpointer */ p->thread.usp = usp; /* and the kernel-mode one */ p->thread.ksp = (unsigned long) swstack; #ifdef DEBUG printk("copy_thread: new regs at 0x%p, as shown below:\n", childregs); show_registers(childregs); #endif return 0; } /* * Be aware of the "magic" 7th argument in the four system-calls below. * They need the latest stackframe, which is put as the 7th argument by * entry.S. The previous arguments are dummies or actually used, but need * to be defined to reach the 7th argument. * * N.B.: Another method to get the stackframe is to use current_regs(). But * it returns the latest stack-frame stacked when going from _user mode_ and * some of these (at least sys_clone) are called from kernel-mode sometimes * (for example during kernel_thread, above) and thus cannot use it. Thus, * to be sure not to get any surprises, we use the method for the other calls * as well. */ asmlinkage int sys_fork(long r10, long r11, long r12, long r13, long mof, long srp, struct pt_regs *regs) { return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL); } /* if newusp is 0, we just grab the old usp */ /* FIXME: Is parent_tid/child_tid really third/fourth argument? Update lib? */ asmlinkage int sys_clone(unsigned long newusp, unsigned long flags, int* parent_tid, int* child_tid, long mof, long srp, struct pt_regs *regs) { if (!newusp) newusp = rdusp(); return do_fork(flags, newusp, regs, 0, parent_tid, child_tid); } /* vfork is a system call in i386 because of register-pressure - maybe * we can remove it and handle it in libc but we put it here until then. */ asmlinkage int sys_vfork(long r10, long r11, long r12, long r13, long mof, long srp, struct pt_regs *regs) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL); } /* * sys_execve() executes a new program. */ asmlinkage int sys_execve(const char *fname, const char *const *argv, const char *const *envp, long r13, long mof, long srp, struct pt_regs *regs) { int error; char *filename; filename = getname(fname); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, argv, envp, regs); putname(filename); out: return error; } unsigned long get_wchan(struct task_struct *p) { #if 0 /* YURGH. TODO. */ unsigned long ebp, esp, eip; unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; stack_page = (unsigned long)p; esp = p->thread.esp; if (!stack_page || esp < stack_page || esp > 8188+stack_page) return 0; /* include/asm-i386/system.h:switch_to() pushes ebp last. */ ebp = *(unsigned long *) esp; do { if (ebp < stack_page || ebp > 8184+stack_page) return 0; eip = *(unsigned long *) (ebp+4); if (!in_sched_functions(eip)) return eip; ebp = *(unsigned long *) ebp; } while (count++ < 16); #endif return 0; } #undef last_sched #undef first_sched void show_regs(struct pt_regs * regs) { unsigned long usp = rdusp(); printk("IRP: %08lx SRP: %08lx DCCR: %08lx USP: %08lx MOF: %08lx\n", regs->irp, regs->srp, regs->dccr, usp, regs->mof ); printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n", regs->r0, regs->r1, regs->r2, regs->r3); printk(" r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n", regs->r4, regs->r5, regs->r6, regs->r7); printk(" r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n", regs->r8, regs->r9, regs->r10, regs->r11); printk("r12: %08lx r13: %08lx oR10: %08lx\n", regs->r12, regs->r13, regs->orig_r10); }
gpl-2.0
junkyde/vikinger
arch/arm/mach-msm/footswitch-pcom.c
3222
7623
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/clk.h> #include <linux/module.h> #include <mach/socinfo.h> #include <mach/proc_comm.h> #include "footswitch.h" /* PCOM power rail IDs */ #define PCOM_FS_GRP 8 #define PCOM_FS_GRP_2D 58 #define PCOM_FS_MDP 14 #define PCOM_FS_MFC 68 #define PCOM_FS_ROTATOR 90 #define PCOM_FS_VFE 41 #define PCOM_FS_VPE 76 #define PCOM_RAIL_MODE_AUTO 0 #define PCOM_RAIL_MODE_MANUAL 1 /** * struct footswitch - Per-footswitch data and state * @rdev: Regulator framework device * @desc: Regulator descriptor * @init_data: Regulator platform data * @pcom_id: Proc-comm ID of the footswitch * @is_enabled: Flag set when footswitch is enabled * @has_ahb_clk: Flag set if footswitched core has an ahb_clk * @has_src_clk: Flag set if footswitched core has a src_clk * @src_clk: Controls the core clock's rate * @core_clk: Clocks the core * @ahb_clk: Clocks the core's register interface * @src_clk_init_rate: Rate to use for src_clk if it has not been set yet * @is_rate_set: Flag set if core_clk's rate has been set */ struct footswitch { struct regulator_dev *rdev; struct regulator_desc desc; struct regulator_init_data init_data; unsigned pcom_id; bool is_enabled; struct clk *src_clk; struct clk *core_clk; struct clk *ahb_clk; const bool has_ahb_clk; const bool has_src_clk; const int src_clk_init_rate; bool is_rate_set; }; static inline int set_rail_mode(int pcom_id, int mode) { int rc; rc = msm_proc_comm(PCOM_CLKCTL_RPC_RAIL_CONTROL, &pcom_id, &mode); if (!rc && pcom_id) rc = -EINVAL; return rc; } static inline int set_rail_state(int pcom_id, int state) { int rc; rc = msm_proc_comm(state, &pcom_id, NULL); if (!rc && pcom_id) rc = -EINVAL; return rc; } static int enable_clocks(struct footswitch *fs) { fs->is_rate_set = !!(clk_get_rate(fs->src_clk)); if (!fs->is_rate_set) clk_set_rate(fs->src_clk, fs->src_clk_init_rate); clk_prepare_enable(fs->core_clk); if (fs->ahb_clk) clk_prepare_enable(fs->ahb_clk); return 0; } static void disable_clocks(struct footswitch *fs) { if (fs->ahb_clk) clk_disable_unprepare(fs->ahb_clk); clk_disable_unprepare(fs->core_clk); } static int footswitch_is_enabled(struct regulator_dev *rdev) { struct footswitch *fs = rdev_get_drvdata(rdev); return fs->is_enabled; } static int footswitch_enable(struct regulator_dev *rdev) { struct footswitch *fs = rdev_get_drvdata(rdev); int rc; rc = enable_clocks(fs); if (rc) return rc; rc = set_rail_state(fs->pcom_id, PCOM_CLKCTL_RPC_RAIL_ENABLE); if (!rc) fs->is_enabled = true; disable_clocks(fs); return rc; } static int footswitch_disable(struct regulator_dev *rdev) { struct footswitch *fs = rdev_get_drvdata(rdev); int rc; rc = enable_clocks(fs); if (rc) return rc; rc = set_rail_state(fs->pcom_id, PCOM_CLKCTL_RPC_RAIL_DISABLE); if (!rc) fs->is_enabled = false; disable_clocks(fs); return rc; } static struct regulator_ops footswitch_ops = { .is_enabled = footswitch_is_enabled, .enable = footswitch_enable, .disable = footswitch_disable, }; #define FOOTSWITCH(_id, _pcom_id, _name, _src_clk, _rate, _ahb_clk) \ [_id] = { \ .desc = { \ .id = _id, \ .name = _name, \ .ops = &footswitch_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ .pcom_id = _pcom_id, \ .has_src_clk = _src_clk, \ .src_clk_init_rate = _rate, \ .has_ahb_clk = _ahb_clk, \ } static struct footswitch footswitches[] = { FOOTSWITCH(FS_GFX3D, PCOM_FS_GRP, "fs_gfx3d", true, 24576000, true), FOOTSWITCH(FS_GFX2D0, PCOM_FS_GRP_2D, "fs_gfx2d0", false, 24576000, true), FOOTSWITCH(FS_MDP, PCOM_FS_MDP, "fs_mdp", false, 24576000, true), FOOTSWITCH(FS_MFC, PCOM_FS_MFC, "fs_mfc", false, 24576000, true), FOOTSWITCH(FS_ROT, PCOM_FS_ROTATOR, "fs_rot", false, 0, true), FOOTSWITCH(FS_VFE, PCOM_FS_VFE, "fs_vfe", false, 24576000, true), FOOTSWITCH(FS_VPE, PCOM_FS_VPE, "fs_vpe", false, 24576000, false), }; static int get_clocks(struct device *dev, struct footswitch *fs) { int rc; /* * Some SoCs may not have a separate rate-settable clock. * If one can't be found, try to use the core clock for * rate-setting instead. */ if (fs->has_src_clk) { fs->src_clk = clk_get(dev, "src_clk"); if (IS_ERR(fs->src_clk)) fs->src_clk = clk_get(dev, "core_clk"); } else { fs->src_clk = clk_get(dev, "core_clk"); } if (IS_ERR(fs->src_clk)) { pr_err("%s clk_get(src_clk) failed\n", fs->desc.name); rc = PTR_ERR(fs->src_clk); goto err_src_clk; } fs->core_clk = clk_get(dev, "core_clk"); if (IS_ERR(fs->core_clk)) { pr_err("%s clk_get(core_clk) failed\n", fs->desc.name); rc = PTR_ERR(fs->core_clk); goto err_core_clk; } if (fs->has_ahb_clk) { fs->ahb_clk = clk_get(dev, "iface_clk"); if (IS_ERR(fs->ahb_clk)) { pr_err("%s clk_get(iface_clk) failed\n", fs->desc.name); rc = PTR_ERR(fs->ahb_clk); goto err_ahb_clk; } } return 0; err_ahb_clk: clk_put(fs->core_clk); err_core_clk: clk_put(fs->src_clk); err_src_clk: return rc; } static void put_clocks(struct footswitch *fs) { clk_put(fs->src_clk); clk_put(fs->core_clk); clk_put(fs->ahb_clk); } static int footswitch_probe(struct platform_device *pdev) { struct footswitch *fs; struct regulator_init_data *init_data; int rc; if (pdev == NULL) return -EINVAL; if (pdev->id >= MAX_FS) return -ENODEV; init_data = pdev->dev.platform_data; fs = &footswitches[pdev->id]; /* * Enable footswitch in manual mode (ie. not controlled along * with pcom clocks). */ rc = set_rail_state(fs->pcom_id, PCOM_CLKCTL_RPC_RAIL_ENABLE); if (rc) return rc; rc = set_rail_mode(fs->pcom_id, PCOM_RAIL_MODE_MANUAL); if (rc) return rc; rc = get_clocks(&pdev->dev, fs); if (rc) return rc; fs->rdev = regulator_register(&fs->desc, &pdev->dev, init_data, fs, NULL); if (IS_ERR(fs->rdev)) { pr_err("regulator_register(%s) failed\n", fs->desc.name); rc = PTR_ERR(fs->rdev); goto err_register; } return 0; err_register: put_clocks(fs); return rc; } static int __devexit footswitch_remove(struct platform_device *pdev) { struct footswitch *fs = &footswitches[pdev->id]; regulator_unregister(fs->rdev); set_rail_mode(fs->pcom_id, PCOM_RAIL_MODE_AUTO); put_clocks(fs); return 0; } static struct platform_driver footswitch_driver = { .probe = footswitch_probe, .remove = __devexit_p(footswitch_remove), .driver = { .name = "footswitch-pcom", .owner = THIS_MODULE, }, }; static int __init footswitch_init(void) { return platform_driver_register(&footswitch_driver); } subsys_initcall(footswitch_init); static void __exit footswitch_exit(void) { platform_driver_unregister(&footswitch_driver); } module_exit(footswitch_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("proc_comm rail footswitch"); MODULE_ALIAS("platform:footswitch-pcom");
gpl-2.0
WZeke/m2_kernel
arch/blackfin/kernel/kgdb.c
7574
12758
/* * arch/blackfin/kernel/kgdb.c - Blackfin kgdb pieces * * Copyright 2005-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/ptrace.h> /* for linux pt_regs struct */ #include <linux/kgdb.h> #include <linux/uaccess.h> void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) { gdb_regs[BFIN_R0] = regs->r0; gdb_regs[BFIN_R1] = regs->r1; gdb_regs[BFIN_R2] = regs->r2; gdb_regs[BFIN_R3] = regs->r3; gdb_regs[BFIN_R4] = regs->r4; gdb_regs[BFIN_R5] = regs->r5; gdb_regs[BFIN_R6] = regs->r6; gdb_regs[BFIN_R7] = regs->r7; gdb_regs[BFIN_P0] = regs->p0; gdb_regs[BFIN_P1] = regs->p1; gdb_regs[BFIN_P2] = regs->p2; gdb_regs[BFIN_P3] = regs->p3; gdb_regs[BFIN_P4] = regs->p4; gdb_regs[BFIN_P5] = regs->p5; gdb_regs[BFIN_SP] = regs->reserved; gdb_regs[BFIN_FP] = regs->fp; gdb_regs[BFIN_I0] = regs->i0; gdb_regs[BFIN_I1] = regs->i1; gdb_regs[BFIN_I2] = regs->i2; gdb_regs[BFIN_I3] = regs->i3; gdb_regs[BFIN_M0] = regs->m0; gdb_regs[BFIN_M1] = regs->m1; gdb_regs[BFIN_M2] = regs->m2; gdb_regs[BFIN_M3] = regs->m3; gdb_regs[BFIN_B0] = regs->b0; gdb_regs[BFIN_B1] = regs->b1; gdb_regs[BFIN_B2] = regs->b2; gdb_regs[BFIN_B3] = regs->b3; gdb_regs[BFIN_L0] = regs->l0; gdb_regs[BFIN_L1] = regs->l1; gdb_regs[BFIN_L2] = regs->l2; gdb_regs[BFIN_L3] = regs->l3; gdb_regs[BFIN_A0_DOT_X] = regs->a0x; gdb_regs[BFIN_A0_DOT_W] = regs->a0w; gdb_regs[BFIN_A1_DOT_X] = regs->a1x; gdb_regs[BFIN_A1_DOT_W] = regs->a1w; gdb_regs[BFIN_ASTAT] = regs->astat; gdb_regs[BFIN_RETS] = regs->rets; gdb_regs[BFIN_LC0] = regs->lc0; gdb_regs[BFIN_LT0] = regs->lt0; gdb_regs[BFIN_LB0] = regs->lb0; gdb_regs[BFIN_LC1] = regs->lc1; gdb_regs[BFIN_LT1] = regs->lt1; gdb_regs[BFIN_LB1] = regs->lb1; gdb_regs[BFIN_CYCLES] = 0; gdb_regs[BFIN_CYCLES2] = 0; gdb_regs[BFIN_USP] = regs->usp; gdb_regs[BFIN_SEQSTAT] = regs->seqstat; gdb_regs[BFIN_SYSCFG] = regs->syscfg; gdb_regs[BFIN_RETI] = regs->pc; gdb_regs[BFIN_RETX] = regs->retx; gdb_regs[BFIN_RETN] = regs->retn; gdb_regs[BFIN_RETE] = regs->rete; gdb_regs[BFIN_PC] = regs->pc; gdb_regs[BFIN_CC] = (regs->astat >> 5) & 1; gdb_regs[BFIN_EXTRA1] = 0; gdb_regs[BFIN_EXTRA2] = 0; gdb_regs[BFIN_EXTRA3] = 0; gdb_regs[BFIN_IPEND] = regs->ipend; } /* * Extracts ebp, esp and eip values understandable by gdb from the values * saved by switch_to. * thread.esp points to ebp. flags and ebp are pushed in switch_to hence esp * prior to entering switch_to is 8 greater than the value that is saved. * If switch_to changes, change following code appropriately. */ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) { gdb_regs[BFIN_SP] = p->thread.ksp; gdb_regs[BFIN_PC] = p->thread.pc; gdb_regs[BFIN_SEQSTAT] = p->thread.seqstat; } void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) { regs->r0 = gdb_regs[BFIN_R0]; regs->r1 = gdb_regs[BFIN_R1]; regs->r2 = gdb_regs[BFIN_R2]; regs->r3 = gdb_regs[BFIN_R3]; regs->r4 = gdb_regs[BFIN_R4]; regs->r5 = gdb_regs[BFIN_R5]; regs->r6 = gdb_regs[BFIN_R6]; regs->r7 = gdb_regs[BFIN_R7]; regs->p0 = gdb_regs[BFIN_P0]; regs->p1 = gdb_regs[BFIN_P1]; regs->p2 = gdb_regs[BFIN_P2]; regs->p3 = gdb_regs[BFIN_P3]; regs->p4 = gdb_regs[BFIN_P4]; regs->p5 = gdb_regs[BFIN_P5]; regs->fp = gdb_regs[BFIN_FP]; regs->i0 = gdb_regs[BFIN_I0]; regs->i1 = gdb_regs[BFIN_I1]; regs->i2 = gdb_regs[BFIN_I2]; regs->i3 = gdb_regs[BFIN_I3]; regs->m0 = gdb_regs[BFIN_M0]; regs->m1 = gdb_regs[BFIN_M1]; regs->m2 = gdb_regs[BFIN_M2]; regs->m3 = gdb_regs[BFIN_M3]; regs->b0 = gdb_regs[BFIN_B0]; regs->b1 = gdb_regs[BFIN_B1]; regs->b2 = gdb_regs[BFIN_B2]; regs->b3 = gdb_regs[BFIN_B3]; regs->l0 = gdb_regs[BFIN_L0]; regs->l1 = gdb_regs[BFIN_L1]; regs->l2 = gdb_regs[BFIN_L2]; regs->l3 = gdb_regs[BFIN_L3]; regs->a0x = gdb_regs[BFIN_A0_DOT_X]; regs->a0w = gdb_regs[BFIN_A0_DOT_W]; regs->a1x = gdb_regs[BFIN_A1_DOT_X]; regs->a1w = gdb_regs[BFIN_A1_DOT_W]; regs->rets = gdb_regs[BFIN_RETS]; regs->lc0 = gdb_regs[BFIN_LC0]; regs->lt0 = gdb_regs[BFIN_LT0]; regs->lb0 = gdb_regs[BFIN_LB0]; regs->lc1 = gdb_regs[BFIN_LC1]; regs->lt1 = gdb_regs[BFIN_LT1]; regs->lb1 = gdb_regs[BFIN_LB1]; regs->usp = gdb_regs[BFIN_USP]; regs->syscfg = gdb_regs[BFIN_SYSCFG]; regs->retx = gdb_regs[BFIN_RETX]; regs->retn = gdb_regs[BFIN_RETN]; regs->rete = gdb_regs[BFIN_RETE]; regs->pc = gdb_regs[BFIN_PC]; #if 0 /* can't change these */ regs->astat = gdb_regs[BFIN_ASTAT]; regs->seqstat = gdb_regs[BFIN_SEQSTAT]; regs->ipend = gdb_regs[BFIN_IPEND]; #endif } static struct hw_breakpoint { unsigned int occupied:1; unsigned int skip:1; unsigned int enabled:1; unsigned int type:1; unsigned int dataacc:2; unsigned short count; unsigned int addr; } breakinfo[HW_WATCHPOINT_NUM]; static int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) { int breakno; int bfin_type; int dataacc = 0; switch (type) { case BP_HARDWARE_BREAKPOINT: bfin_type = TYPE_INST_WATCHPOINT; break; case BP_WRITE_WATCHPOINT: dataacc = 1; bfin_type = TYPE_DATA_WATCHPOINT; break; case BP_READ_WATCHPOINT: dataacc = 2; bfin_type = TYPE_DATA_WATCHPOINT; break; case BP_ACCESS_WATCHPOINT: dataacc = 3; bfin_type = TYPE_DATA_WATCHPOINT; break; default: return -ENOSPC; } /* Because hardware data watchpoint impelemented in current * Blackfin can not trigger an exception event as the hardware * instrction watchpoint does, we ignaore all data watch point here. * They can be turned on easily after future blackfin design * supports this feature. */ for (breakno = 0; breakno < HW_INST_WATCHPOINT_NUM; breakno++) if (bfin_type == breakinfo[breakno].type && !breakinfo[breakno].occupied) { breakinfo[breakno].occupied = 1; breakinfo[breakno].skip = 0; breakinfo[breakno].enabled = 1; breakinfo[breakno].addr = addr; breakinfo[breakno].dataacc = dataacc; breakinfo[breakno].count = 0; return 0; } return -ENOSPC; } static int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) { int breakno; int bfin_type; switch (type) { case BP_HARDWARE_BREAKPOINT: bfin_type = TYPE_INST_WATCHPOINT; break; case BP_WRITE_WATCHPOINT: case BP_READ_WATCHPOINT: case BP_ACCESS_WATCHPOINT: bfin_type = TYPE_DATA_WATCHPOINT; break; default: return 0; } for (breakno = 0; breakno < HW_WATCHPOINT_NUM; breakno++) if (bfin_type == breakinfo[breakno].type && breakinfo[breakno].occupied && breakinfo[breakno].addr == addr) { breakinfo[breakno].occupied = 0; breakinfo[breakno].enabled = 0; } return 0; } static void bfin_remove_all_hw_break(void) { int breakno; memset(breakinfo, 0, sizeof(struct hw_breakpoint)*HW_WATCHPOINT_NUM); for (breakno = 0; breakno < HW_INST_WATCHPOINT_NUM; breakno++) breakinfo[breakno].type = TYPE_INST_WATCHPOINT; for (; breakno < HW_WATCHPOINT_NUM; breakno++) breakinfo[breakno].type = TYPE_DATA_WATCHPOINT; } static void bfin_correct_hw_break(void) { int breakno; unsigned int wpiactl = 0; unsigned int wpdactl = 0; int enable_wp = 0; for (breakno = 0; breakno < HW_WATCHPOINT_NUM; breakno++) if (breakinfo[breakno].enabled) { enable_wp = 1; switch (breakno) { case 0: wpiactl |= WPIAEN0|WPICNTEN0; bfin_write_WPIA0(breakinfo[breakno].addr); bfin_write_WPIACNT0(breakinfo[breakno].count + breakinfo->skip); break; case 1: wpiactl |= WPIAEN1|WPICNTEN1; bfin_write_WPIA1(breakinfo[breakno].addr); bfin_write_WPIACNT1(breakinfo[breakno].count + breakinfo->skip); break; case 2: wpiactl |= WPIAEN2|WPICNTEN2; bfin_write_WPIA2(breakinfo[breakno].addr); bfin_write_WPIACNT2(breakinfo[breakno].count + breakinfo->skip); break; case 3: wpiactl |= WPIAEN3|WPICNTEN3; bfin_write_WPIA3(breakinfo[breakno].addr); bfin_write_WPIACNT3(breakinfo[breakno].count + breakinfo->skip); break; case 4: wpiactl |= WPIAEN4|WPICNTEN4; bfin_write_WPIA4(breakinfo[breakno].addr); bfin_write_WPIACNT4(breakinfo[breakno].count + breakinfo->skip); break; case 5: wpiactl |= WPIAEN5|WPICNTEN5; bfin_write_WPIA5(breakinfo[breakno].addr); bfin_write_WPIACNT5(breakinfo[breakno].count + breakinfo->skip); break; case 6: wpdactl |= WPDAEN0|WPDCNTEN0|WPDSRC0; wpdactl |= breakinfo[breakno].dataacc << WPDACC0_OFFSET; bfin_write_WPDA0(breakinfo[breakno].addr); bfin_write_WPDACNT0(breakinfo[breakno].count + breakinfo->skip); break; case 7: wpdactl |= WPDAEN1|WPDCNTEN1|WPDSRC1; wpdactl |= breakinfo[breakno].dataacc << WPDACC1_OFFSET; bfin_write_WPDA1(breakinfo[breakno].addr); bfin_write_WPDACNT1(breakinfo[breakno].count + breakinfo->skip); break; } } /* Should enable WPPWR bit first before set any other * WPIACTL and WPDACTL bits */ if (enable_wp) { bfin_write_WPIACTL(WPPWR); CSYNC(); bfin_write_WPIACTL(wpiactl|WPPWR); bfin_write_WPDACTL(wpdactl); CSYNC(); } } static void bfin_disable_hw_debug(struct pt_regs *regs) { /* Disable hardware debugging while we are in kgdb */ bfin_write_WPIACTL(0); bfin_write_WPDACTL(0); CSYNC(); } #ifdef CONFIG_SMP void kgdb_passive_cpu_callback(void *info) { kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); } void kgdb_roundup_cpus(unsigned long flags) { smp_call_function(kgdb_passive_cpu_callback, NULL, 0); } void kgdb_roundup_cpu(int cpu, unsigned long flags) { smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0); } #endif #ifdef CONFIG_IPIPE static unsigned long kgdb_arch_imask; #endif void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code) { if (kgdb_single_step) preempt_enable(); #ifdef CONFIG_IPIPE if (kgdb_arch_imask) { cpu_pda[raw_smp_processor_id()].ex_imask = kgdb_arch_imask; kgdb_arch_imask = 0; } #endif } int kgdb_arch_handle_exception(int vector, int signo, int err_code, char *remcom_in_buffer, char *remcom_out_buffer, struct pt_regs *regs) { long addr; char *ptr; int newPC; int i; switch (remcom_in_buffer[0]) { case 'c': case 's': if (kgdb_contthread && kgdb_contthread != current) { strcpy(remcom_out_buffer, "E00"); break; } kgdb_contthread = NULL; /* try to read optional parameter, pc unchanged if no parm */ ptr = &remcom_in_buffer[1]; if (kgdb_hex2long(&ptr, &addr)) { regs->retx = addr; } newPC = regs->retx; /* clear the trace bit */ regs->syscfg &= 0xfffffffe; /* set the trace bit if we're stepping */ if (remcom_in_buffer[0] == 's') { regs->syscfg |= 0x1; kgdb_single_step = regs->ipend; kgdb_single_step >>= 6; for (i = 10; i > 0; i--, kgdb_single_step >>= 1) if (kgdb_single_step & 1) break; /* i indicate event priority of current stopped instruction * user space instruction is 0, IVG15 is 1, IVTMR is 10. * kgdb_single_step > 0 means in single step mode */ kgdb_single_step = i + 1; preempt_disable(); #ifdef CONFIG_IPIPE kgdb_arch_imask = cpu_pda[raw_smp_processor_id()].ex_imask; cpu_pda[raw_smp_processor_id()].ex_imask = 0; #endif } bfin_correct_hw_break(); return 0; } /* switch */ return -1; /* this means that we do not want to exit from the handler */ } struct kgdb_arch arch_kgdb_ops = { .gdb_bpt_instr = {0xa1}, .flags = KGDB_HW_BREAKPOINT, .set_hw_breakpoint = bfin_set_hw_break, .remove_hw_breakpoint = bfin_remove_hw_break, .disable_hw_break = bfin_disable_hw_debug, .remove_all_hw_break = bfin_remove_all_hw_break, .correct_hw_break = bfin_correct_hw_break, }; #define IN_MEM(addr, size, l1_addr, l1_size) \ ({ \ unsigned long __addr = (unsigned long)(addr); \ (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \ }) #define ASYNC_BANK_SIZE \ (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \ ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) int kgdb_validate_break_address(unsigned long addr) { int cpu = raw_smp_processor_id(); if (addr >= 0x1000 && (addr + BREAK_INSTR_SIZE) <= physical_mem_end) return 0; if (IN_MEM(addr, BREAK_INSTR_SIZE, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE)) return 0; if (cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) return 0; #ifdef CONFIG_SMP else if (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH)) return 0; #endif if (IN_MEM(addr, BREAK_INSTR_SIZE, L2_START, L2_LENGTH)) return 0; return -EFAULT; } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->retx = ip; } int kgdb_arch_init(void) { kgdb_single_step = 0; #ifdef CONFIG_IPIPE kgdb_arch_imask = 0; #endif bfin_remove_all_hw_break(); return 0; } void kgdb_arch_exit(void) { }
gpl-2.0
Loller79/Solid_Kernel-STOCK-KK-CAF
drivers/hid/hid-gyration.c
8086
3121
/* * HID driver for some gyration "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2006-2008 Jiri Kosina */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #define gy_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR) return 0; set_bit(EV_REP, hi->input->evbit); switch (usage->hid & HID_USAGE) { /* Reported on Gyration MCE Remote */ case 0x00d: gy_map_key_clear(KEY_HOME); break; case 0x024: gy_map_key_clear(KEY_DVD); break; case 0x025: gy_map_key_clear(KEY_PVR); break; case 0x046: gy_map_key_clear(KEY_MEDIA); break; case 0x047: gy_map_key_clear(KEY_MP3); break; case 0x048: gy_map_key_clear(KEY_MEDIA); break; case 0x049: gy_map_key_clear(KEY_CAMERA); break; case 0x04a: gy_map_key_clear(KEY_VIDEO); break; case 0x05a: gy_map_key_clear(KEY_TEXT); break; case 0x05b: gy_map_key_clear(KEY_RED); break; case 0x05c: gy_map_key_clear(KEY_GREEN); break; case 0x05d: gy_map_key_clear(KEY_YELLOW); break; case 0x05e: gy_map_key_clear(KEY_BLUE); break; default: return 0; } return 1; } static int gyration_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput) return 0; if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK && (usage->hid & 0xff) == 0x82) { struct input_dev *input = field->hidinput->input; input_event(input, usage->type, usage->code, 1); input_sync(input); input_event(input, usage->type, usage->code, 0); input_sync(input); return 1; } return 0; } static const struct hid_device_id gyration_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, { } }; MODULE_DEVICE_TABLE(hid, gyration_devices); static struct hid_driver gyration_driver = { .name = "gyration", .id_table = gyration_devices, .input_mapping = gyration_input_mapping, .event = gyration_event, }; static int __init gyration_init(void) { return hid_register_driver(&gyration_driver); } static void __exit gyration_exit(void) { hid_unregister_driver(&gyration_driver); } module_init(gyration_init); module_exit(gyration_exit); MODULE_LICENSE("GPL");
gpl-2.0
TeamBliss-Devices/android_kernel_samsung_s3ve3g
drivers/s390/char/ctrlchar.c
8342
1685
/* * drivers/s390/char/ctrlchar.c * Unified handling of special chars. * * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com> * */ #include <linux/stddef.h> #include <asm/errno.h> #include <linux/sysrq.h> #include <linux/ctype.h> #include "ctrlchar.h" #ifdef CONFIG_MAGIC_SYSRQ static int ctrlchar_sysrq_key; static void ctrlchar_handle_sysrq(struct work_struct *work) { handle_sysrq(ctrlchar_sysrq_key); } static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq); #endif /** * Check for special chars at start of input. * * @param buf Console input buffer. * @param len Length of valid data in buffer. * @param tty The tty struct for this console. * @return CTRLCHAR_NONE, if nothing matched, * CTRLCHAR_SYSRQ, if sysrq was encountered * otherwise char to be inserted logically or'ed * with CTRLCHAR_CTRL */ unsigned int ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty) { if ((len < 2) || (len > 3)) return CTRLCHAR_NONE; /* hat is 0xb1 in codepage 037 (US etc.) and thus */ /* converted to 0x5e in ascii ('^') */ if ((buf[0] != '^') && (buf[0] != '\252')) return CTRLCHAR_NONE; #ifdef CONFIG_MAGIC_SYSRQ /* racy */ if (len == 3 && buf[1] == '-') { ctrlchar_sysrq_key = buf[2]; schedule_work(&ctrlchar_work); return CTRLCHAR_SYSRQ; } #endif if (len != 2) return CTRLCHAR_NONE; switch (tolower(buf[1])) { case 'c': return INTR_CHAR(tty) | CTRLCHAR_CTRL; case 'd': return EOF_CHAR(tty) | CTRLCHAR_CTRL; case 'z': return SUSP_CHAR(tty) | CTRLCHAR_CTRL; } return CTRLCHAR_NONE; }
gpl-2.0
stas2z/linux-3.10-witi
drivers/media/pci/ttpci/budget-core.c
8598
17325
/* * budget-core.c: driver for the SAA7146 based Budget DVB cards * * Compiled from various sources by Michael Hunold <michael@mihu.de> * * Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de> * * Copyright (C) 1999-2002 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * * 26feb2004 Support for FS Activy Card (Grundig tuner) by * Michael Dreher <michael@5dot1.de>, * Oliver Endriss <o.endriss@gmx.de>, * Andreas 'randy' Weinberger * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * * the project's page is at http://www.linuxtv.org/ */ #include "budget.h" #include "ttpci-eeprom.h" #define TS_WIDTH (2 * TS_SIZE) #define TS_WIDTH_ACTIVY TS_SIZE #define TS_WIDTH_DVBC TS_SIZE #define TS_HEIGHT_MASK 0xf00 #define TS_HEIGHT_MASK_ACTIVY 0xc00 #define TS_HEIGHT_MASK_DVBC 0xe00 #define TS_MIN_BUFSIZE_K 188 #define TS_MAX_BUFSIZE_K 1410 #define TS_MAX_BUFSIZE_K_ACTIVY 564 #define TS_MAX_BUFSIZE_K_DVBC 1316 #define BUFFER_WARNING_WAIT (30*HZ) int budget_debug; static int dma_buffer_size = TS_MIN_BUFSIZE_K; module_param_named(debug, budget_debug, int, 0644); module_param_named(bufsize, dma_buffer_size, int, 0444); MODULE_PARM_DESC(debug, "Turn on/off budget debugging (default:off)."); MODULE_PARM_DESC(bufsize, "DMA buffer size in KB, default: 188, min: 188, max: 1410 (Activy: 564)"); /**************************************************************************** * TT budget / WinTV Nova ****************************************************************************/ static int stop_ts_capture(struct budget *budget) { dprintk(2, "budget: %p\n", budget); saa7146_write(budget->dev, MC1, MASK_20); // DMA3 off SAA7146_IER_DISABLE(budget->dev, MASK_10); return 0; } static int start_ts_capture(struct budget *budget) { struct saa7146_dev *dev = budget->dev; dprintk(2, "budget: %p\n", budget); if (!budget->feeding || !budget->fe_synced) return 0; saa7146_write(dev, MC1, MASK_20); // DMA3 off memset(budget->grabbing, 0x00, budget->buffer_size); saa7146_write(dev, PCI_BT_V1, 0x001c0000 | (saa7146_read(dev, PCI_BT_V1) & ~0x001f0000)); budget->ttbp = 0; /* * Signal path on the Activy: * * tuner -> SAA7146 port A -> SAA7146 BRS -> SAA7146 DMA3 -> memory * * Since the tuner feeds 204 bytes packets into the SAA7146, * DMA3 is configured to strip the trailing 16 FEC bytes: * Pitch: 188, NumBytes3: 188, NumLines3: 1024 */ switch(budget->card->type) { case BUDGET_FS_ACTIVY: saa7146_write(dev, DD1_INIT, 0x04000000); saa7146_write(dev, MC2, (MASK_09 | MASK_25)); saa7146_write(dev, BRS_CTRL, 0x00000000); break; case BUDGET_PATCH: saa7146_write(dev, DD1_INIT, 0x00000200); saa7146_write(dev, MC2, (MASK_10 | MASK_26)); saa7146_write(dev, BRS_CTRL, 0x60000000); break; case BUDGET_CIN1200C_MK3: case BUDGET_KNC1C_MK3: case BUDGET_KNC1C_TDA10024: case BUDGET_KNC1CP_MK3: if (budget->video_port == BUDGET_VIDEO_PORTA) { saa7146_write(dev, DD1_INIT, 0x06000200); saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); saa7146_write(dev, BRS_CTRL, 0x00000000); } else { saa7146_write(dev, DD1_INIT, 0x00000600); saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); saa7146_write(dev, BRS_CTRL, 0x60000000); } break; default: if (budget->video_port == BUDGET_VIDEO_PORTA) { saa7146_write(dev, DD1_INIT, 0x06000200); saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); saa7146_write(dev, BRS_CTRL, 0x00000000); } else { saa7146_write(dev, DD1_INIT, 0x02000600); saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); saa7146_write(dev, BRS_CTRL, 0x60000000); } } saa7146_write(dev, MC2, (MASK_08 | MASK_24)); mdelay(10); saa7146_write(dev, BASE_ODD3, 0); if (budget->buffer_size > budget->buffer_height * budget->buffer_width) { // using odd/even buffers saa7146_write(dev, BASE_EVEN3, budget->buffer_height * budget->buffer_width); } else { // using a single buffer saa7146_write(dev, BASE_EVEN3, 0); } saa7146_write(dev, PROT_ADDR3, budget->buffer_size); saa7146_write(dev, BASE_PAGE3, budget->pt.dma | ME1 | 0x90); saa7146_write(dev, PITCH3, budget->buffer_width); saa7146_write(dev, NUM_LINE_BYTE3, (budget->buffer_height << 16) | budget->buffer_width); saa7146_write(dev, MC2, (MASK_04 | MASK_20)); SAA7146_ISR_CLEAR(budget->dev, MASK_10); /* VPE */ SAA7146_IER_ENABLE(budget->dev, MASK_10); /* VPE */ saa7146_write(dev, MC1, (MASK_04 | MASK_20)); /* DMA3 on */ return 0; } static int budget_read_fe_status(struct dvb_frontend *fe, fe_status_t *status) { struct budget *budget = (struct budget *) fe->dvb->priv; int synced; int ret; if (budget->read_fe_status) ret = budget->read_fe_status(fe, status); else ret = -EINVAL; if (!ret) { synced = (*status & FE_HAS_LOCK); if (synced != budget->fe_synced) { budget->fe_synced = synced; spin_lock(&budget->feedlock); if (synced) start_ts_capture(budget); else stop_ts_capture(budget); spin_unlock(&budget->feedlock); } } return ret; } static void vpeirq(unsigned long data) { struct budget *budget = (struct budget *) data; u8 *mem = (u8 *) (budget->grabbing); u32 olddma = budget->ttbp; u32 newdma = saa7146_read(budget->dev, PCI_VDP3); u32 count; /* Ensure streamed PCI data is synced to CPU */ pci_dma_sync_sg_for_cpu(budget->dev->pci, budget->pt.slist, budget->pt.nents, PCI_DMA_FROMDEVICE); /* nearest lower position divisible by 188 */ newdma -= newdma % 188; if (newdma >= budget->buffer_size) return; budget->ttbp = newdma; if (budget->feeding == 0 || newdma == olddma) return; if (newdma > olddma) { /* no wraparound, dump olddma..newdma */ count = newdma - olddma; dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, count / 188); } else { /* wraparound, dump olddma..buflen and 0..newdma */ count = budget->buffer_size - olddma; dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, count / 188); count += newdma; dvb_dmx_swfilter_packets(&budget->demux, mem, newdma / 188); } if (count > budget->buffer_warning_threshold) budget->buffer_warnings++; if (budget->buffer_warnings && time_after(jiffies, budget->buffer_warning_time)) { printk("%s %s: used %d times >80%% of buffer (%u bytes now)\n", budget->dev->name, __func__, budget->buffer_warnings, count); budget->buffer_warning_time = jiffies + BUFFER_WARNING_WAIT; budget->buffer_warnings = 0; } } int ttpci_budget_debiread(struct budget *budget, u32 config, int addr, int count, int uselocks, int nobusyloop) { struct saa7146_dev *saa = budget->dev; int result = 0; unsigned long flags = 0; if (count > 4 || count <= 0) return 0; if (uselocks) spin_lock_irqsave(&budget->debilock, flags); if ((result = saa7146_wait_for_debi_done(saa, nobusyloop)) < 0) { if (uselocks) spin_unlock_irqrestore(&budget->debilock, flags); return result; } saa7146_write(saa, DEBI_COMMAND, (count << 17) | 0x10000 | (addr & 0xffff)); saa7146_write(saa, DEBI_CONFIG, config); saa7146_write(saa, DEBI_PAGE, 0); saa7146_write(saa, MC2, (2 << 16) | 2); if ((result = saa7146_wait_for_debi_done(saa, nobusyloop)) < 0) { if (uselocks) spin_unlock_irqrestore(&budget->debilock, flags); return result; } result = saa7146_read(saa, DEBI_AD); result &= (0xffffffffUL >> ((4 - count) * 8)); if (uselocks) spin_unlock_irqrestore(&budget->debilock, flags); return result; } int ttpci_budget_debiwrite(struct budget *budget, u32 config, int addr, int count, u32 value, int uselocks, int nobusyloop) { struct saa7146_dev *saa = budget->dev; unsigned long flags = 0; int result; if (count > 4 || count <= 0) return 0; if (uselocks) spin_lock_irqsave(&budget->debilock, flags); if ((result = saa7146_wait_for_debi_done(saa, nobusyloop)) < 0) { if (uselocks) spin_unlock_irqrestore(&budget->debilock, flags); return result; } saa7146_write(saa, DEBI_COMMAND, (count << 17) | 0x00000 | (addr & 0xffff)); saa7146_write(saa, DEBI_CONFIG, config); saa7146_write(saa, DEBI_PAGE, 0); saa7146_write(saa, DEBI_AD, value); saa7146_write(saa, MC2, (2 << 16) | 2); if ((result = saa7146_wait_for_debi_done(saa, nobusyloop)) < 0) { if (uselocks) spin_unlock_irqrestore(&budget->debilock, flags); return result; } if (uselocks) spin_unlock_irqrestore(&budget->debilock, flags); return 0; } /**************************************************************************** * DVB API SECTION ****************************************************************************/ static int budget_start_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct budget *budget = (struct budget *) demux->priv; int status = 0; dprintk(2, "budget: %p\n", budget); if (!demux->dmx.frontend) return -EINVAL; spin_lock(&budget->feedlock); feed->pusi_seen = 0; /* have a clean section start */ if (budget->feeding++ == 0) status = start_ts_capture(budget); spin_unlock(&budget->feedlock); return status; } static int budget_stop_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct budget *budget = (struct budget *) demux->priv; int status = 0; dprintk(2, "budget: %p\n", budget); spin_lock(&budget->feedlock); if (--budget->feeding == 0) status = stop_ts_capture(budget); spin_unlock(&budget->feedlock); return status; } static int budget_register(struct budget *budget) { struct dvb_demux *dvbdemux = &budget->demux; int ret; dprintk(2, "budget: %p\n", budget); dvbdemux->priv = (void *) budget; dvbdemux->filternum = 256; dvbdemux->feednum = 256; dvbdemux->start_feed = budget_start_feed; dvbdemux->stop_feed = budget_stop_feed; dvbdemux->write_to_decoder = NULL; dvbdemux->dmx.capabilities = (DMX_TS_FILTERING | DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING); dvb_dmx_init(&budget->demux); budget->dmxdev.filternum = 256; budget->dmxdev.demux = &dvbdemux->dmx; budget->dmxdev.capabilities = 0; dvb_dmxdev_init(&budget->dmxdev, &budget->dvb_adapter); budget->hw_frontend.source = DMX_FRONTEND_0; ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend); if (ret < 0) return ret; budget->mem_frontend.source = DMX_MEMORY_FE; ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend); if (ret < 0) return ret; ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend); if (ret < 0) return ret; dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx); return 0; } static void budget_unregister(struct budget *budget) { struct dvb_demux *dvbdemux = &budget->demux; dprintk(2, "budget: %p\n", budget); dvb_net_release(&budget->dvb_net); dvbdemux->dmx.close(&dvbdemux->dmx); dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &budget->hw_frontend); dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &budget->mem_frontend); dvb_dmxdev_release(&budget->dmxdev); dvb_dmx_release(&budget->demux); } int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev, struct saa7146_pci_extension_data *info, struct module *owner, short *adapter_nums) { int ret = 0; struct budget_info *bi = info->ext_priv; int max_bufsize; int height_mask; memset(budget, 0, sizeof(struct budget)); dprintk(2, "dev: %p, budget: %p\n", dev, budget); budget->card = bi; budget->dev = (struct saa7146_dev *) dev; switch(budget->card->type) { case BUDGET_FS_ACTIVY: budget->buffer_width = TS_WIDTH_ACTIVY; max_bufsize = TS_MAX_BUFSIZE_K_ACTIVY; height_mask = TS_HEIGHT_MASK_ACTIVY; break; case BUDGET_KNC1C: case BUDGET_KNC1CP: case BUDGET_CIN1200C: case BUDGET_KNC1C_MK3: case BUDGET_KNC1C_TDA10024: case BUDGET_KNC1CP_MK3: case BUDGET_CIN1200C_MK3: budget->buffer_width = TS_WIDTH_DVBC; max_bufsize = TS_MAX_BUFSIZE_K_DVBC; height_mask = TS_HEIGHT_MASK_DVBC; break; default: budget->buffer_width = TS_WIDTH; max_bufsize = TS_MAX_BUFSIZE_K; height_mask = TS_HEIGHT_MASK; } if (dma_buffer_size < TS_MIN_BUFSIZE_K) dma_buffer_size = TS_MIN_BUFSIZE_K; else if (dma_buffer_size > max_bufsize) dma_buffer_size = max_bufsize; budget->buffer_height = dma_buffer_size * 1024 / budget->buffer_width; if (budget->buffer_height > 0xfff) { budget->buffer_height /= 2; budget->buffer_height &= height_mask; budget->buffer_size = 2 * budget->buffer_height * budget->buffer_width; } else { budget->buffer_height &= height_mask; budget->buffer_size = budget->buffer_height * budget->buffer_width; } budget->buffer_warning_threshold = budget->buffer_size * 80/100; budget->buffer_warnings = 0; budget->buffer_warning_time = jiffies; dprintk(2, "%s: buffer type = %s, width = %d, height = %d\n", budget->dev->name, budget->buffer_size > budget->buffer_width * budget->buffer_height ? "odd/even" : "single", budget->buffer_width, budget->buffer_height); printk("%s: dma buffer size %u\n", budget->dev->name, budget->buffer_size); ret = dvb_register_adapter(&budget->dvb_adapter, budget->card->name, owner, &budget->dev->pci->dev, adapter_nums); if (ret < 0) return ret; /* set dd1 stream a & b */ saa7146_write(dev, DD1_STREAM_B, 0x00000000); saa7146_write(dev, MC2, (MASK_09 | MASK_25)); saa7146_write(dev, MC2, (MASK_10 | MASK_26)); saa7146_write(dev, DD1_INIT, 0x02000000); saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); if (bi->type != BUDGET_FS_ACTIVY) budget->video_port = BUDGET_VIDEO_PORTB; else budget->video_port = BUDGET_VIDEO_PORTA; spin_lock_init(&budget->feedlock); spin_lock_init(&budget->debilock); /* the Siemens DVB needs this if you want to have the i2c chips get recognized before the main driver is loaded */ if (bi->type != BUDGET_FS_ACTIVY) saa7146_write(dev, GPIO_CTRL, 0x500000); /* GPIO 3 = 1 */ strlcpy(budget->i2c_adap.name, budget->card->name, sizeof(budget->i2c_adap.name)); saa7146_i2c_adapter_prepare(dev, &budget->i2c_adap, SAA7146_I2C_BUS_BIT_RATE_120); strcpy(budget->i2c_adap.name, budget->card->name); if (i2c_add_adapter(&budget->i2c_adap) < 0) { ret = -ENOMEM; goto err_dvb_unregister; } ttpci_eeprom_parse_mac(&budget->i2c_adap, budget->dvb_adapter.proposed_mac); budget->grabbing = saa7146_vmalloc_build_pgtable(dev->pci, budget->buffer_size, &budget->pt); if (NULL == budget->grabbing) { ret = -ENOMEM; goto err_del_i2c; } saa7146_write(dev, PCI_BT_V1, 0x001c0000); /* upload all */ saa7146_write(dev, GPIO_CTRL, 0x000000); tasklet_init(&budget->vpe_tasklet, vpeirq, (unsigned long) budget); /* frontend power on */ if (bi->type != BUDGET_FS_ACTIVY) saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI); if ((ret = budget_register(budget)) == 0) return 0; /* Everything OK */ /* An error occurred, cleanup resources */ saa7146_vfree_destroy_pgtable(dev->pci, budget->grabbing, &budget->pt); err_del_i2c: i2c_del_adapter(&budget->i2c_adap); err_dvb_unregister: dvb_unregister_adapter(&budget->dvb_adapter); return ret; } void ttpci_budget_init_hooks(struct budget *budget) { if (budget->dvb_frontend && !budget->read_fe_status) { budget->read_fe_status = budget->dvb_frontend->ops.read_status; budget->dvb_frontend->ops.read_status = budget_read_fe_status; } } int ttpci_budget_deinit(struct budget *budget) { struct saa7146_dev *dev = budget->dev; dprintk(2, "budget: %p\n", budget); budget_unregister(budget); tasklet_kill(&budget->vpe_tasklet); saa7146_vfree_destroy_pgtable(dev->pci, budget->grabbing, &budget->pt); i2c_del_adapter(&budget->i2c_adap); dvb_unregister_adapter(&budget->dvb_adapter); return 0; } void ttpci_budget_irq10_handler(struct saa7146_dev *dev, u32 * isr) { struct budget *budget = (struct budget *) dev->ext_priv; dprintk(8, "dev: %p, budget: %p\n", dev, budget); if (*isr & MASK_10) tasklet_schedule(&budget->vpe_tasklet); } void ttpci_budget_set_video_port(struct saa7146_dev *dev, int video_port) { struct budget *budget = (struct budget *) dev->ext_priv; spin_lock(&budget->feedlock); budget->video_port = video_port; if (budget->feeding) { stop_ts_capture(budget); start_ts_capture(budget); } spin_unlock(&budget->feedlock); } EXPORT_SYMBOL_GPL(ttpci_budget_debiread); EXPORT_SYMBOL_GPL(ttpci_budget_debiwrite); EXPORT_SYMBOL_GPL(ttpci_budget_init); EXPORT_SYMBOL_GPL(ttpci_budget_init_hooks); EXPORT_SYMBOL_GPL(ttpci_budget_deinit); EXPORT_SYMBOL_GPL(ttpci_budget_irq10_handler); EXPORT_SYMBOL_GPL(ttpci_budget_set_video_port); EXPORT_SYMBOL_GPL(budget_debug); MODULE_LICENSE("GPL");
gpl-2.0
liquidware/android-kernel-omap3
security/integrity/ima/ima_init.c
9110
2699
/* * Copyright (C) 2005,2006,2007,2008 IBM Corporation * * Authors: * Reiner Sailer <sailer@watson.ibm.com> * Leendert van Doorn <leendert@watson.ibm.com> * Mimi Zohar <zohar@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * File: ima_init.c * initialization and cleanup functions */ #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/err.h> #include "ima.h" /* name for boot aggregate entry */ static const char *boot_aggregate_name = "boot_aggregate"; int ima_used_chip; /* Add the boot aggregate to the IMA measurement list and extend * the PCR register. * * Calculate the boot aggregate, a SHA1 over tpm registers 0-7, * assuming a TPM chip exists, and zeroes if the TPM chip does not * exist. Add the boot aggregate measurement to the measurement * list and extend the PCR register. * * If a tpm chip does not exist, indicate the core root of trust is * not hardware based by invalidating the aggregate PCR value. * (The aggregate PCR value is invalidated by adding one value to * the measurement list and extending the aggregate PCR value with * a different value.) Violations add a zero entry to the measurement * list and extend the aggregate PCR value with ff...ff's. */ static void __init ima_add_boot_aggregate(void) { struct ima_template_entry *entry; const char *op = "add_boot_aggregate"; const char *audit_cause = "ENOMEM"; int result = -ENOMEM; int violation = 1; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) goto err_out; memset(&entry->template, 0, sizeof(entry->template)); strncpy(entry->template.file_name, boot_aggregate_name, IMA_EVENT_NAME_LEN_MAX); if (ima_used_chip) { violation = 0; result = ima_calc_boot_aggregate(entry->template.digest); if (result < 0) { audit_cause = "hashing_error"; kfree(entry); goto err_out; } } result = ima_store_template(entry, violation, NULL); if (result < 0) kfree(entry); return; err_out: integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op, audit_cause, result, 0); } int __init ima_init(void) { u8 pcr_i[IMA_DIGEST_SIZE]; int rc; ima_used_chip = 0; rc = tpm_pcr_read(TPM_ANY_NUM, 0, pcr_i); if (rc == 0) ima_used_chip = 1; if (!ima_used_chip) pr_info("IMA: No TPM chip found, activating TPM-bypass!\n"); ima_add_boot_aggregate(); /* boot aggregate must be first entry */ ima_init_policy(); return ima_fs_init(); } void __exit ima_cleanup(void) { ima_fs_cleanup(); }
gpl-2.0
Motorhead1991/android_kernel_samsung_amazing
arch/cris/arch-v32/mach-a3/arbiter.c
9878
17688
/* * Memory arbiter functions. Allocates bandwidth through the * arbiter and sets up arbiter breakpoints. * * The algorithm first assigns slots to the clients that has specified * bandwidth (e.g. ethernet) and then the remaining slots are divided * on all the active clients. * * Copyright (c) 2004-2007 Axis Communications AB. * * The artpec-3 has two arbiters. The memory hierarchy looks like this: * * * CPU DMAs * | | * | | * -------------- ------------------ * | foo arbiter|----| Internal memory| * -------------- ------------------ * | * -------------- * | L2 cache | * -------------- * | * h264 etc | * | | * | | * -------------- * | bar arbiter| * -------------- * | * --------- * | SDRAM | * --------- * */ #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> #include <hwregs/marb_foo_defs.h> #include <hwregs/marb_bar_defs.h> #include <arbiter.h> #include <hwregs/intr_vect.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <asm/io.h> #include <asm/irq_regs.h> #define D(x) struct crisv32_watch_entry { unsigned long instance; watch_callback *cb; unsigned long start; unsigned long end; int used; }; #define NUMBER_OF_BP 4 #define SDRAM_BANDWIDTH 400000000 #define INTMEM_BANDWIDTH 400000000 #define NBR_OF_SLOTS 64 #define NBR_OF_REGIONS 2 #define NBR_OF_CLIENTS 15 #define ARBITERS 2 #define UNASSIGNED 100 struct arbiter { unsigned long instance; int nbr_regions; int nbr_clients; int requested_slots[NBR_OF_REGIONS][NBR_OF_CLIENTS]; int active_clients[NBR_OF_REGIONS][NBR_OF_CLIENTS]; }; static struct crisv32_watch_entry watches[ARBITERS][NUMBER_OF_BP] = { { {regi_marb_foo_bp0}, {regi_marb_foo_bp1}, {regi_marb_foo_bp2}, {regi_marb_foo_bp3} }, { {regi_marb_bar_bp0}, {regi_marb_bar_bp1}, {regi_marb_bar_bp2}, {regi_marb_bar_bp3} } }; struct arbiter arbiters[ARBITERS] = { { /* L2 cache arbiter */ .instance = regi_marb_foo, .nbr_regions = 2, .nbr_clients = 15 }, { /* DDR2 arbiter */ .instance = regi_marb_bar, .nbr_regions = 1, .nbr_clients = 9 } }; static int max_bandwidth[NBR_OF_REGIONS] = {SDRAM_BANDWIDTH, INTMEM_BANDWIDTH}; DEFINE_SPINLOCK(arbiter_lock); static irqreturn_t crisv32_foo_arbiter_irq(int irq, void *dev_id); static irqreturn_t crisv32_bar_arbiter_irq(int irq, void *dev_id); /* * "I'm the arbiter, I know the score. * From square one I'll be watching all 64." * (memory arbiter slots, that is) * * Or in other words: * Program the memory arbiter slots for "region" according to what's * in requested_slots[] and active_clients[], while minimizing * latency. A caller may pass a non-zero positive amount for * "unused_slots", which must then be the unallocated, remaining * number of slots, free to hand out to any client. */ static void crisv32_arbiter_config(int arbiter, int region, int unused_slots) { int slot; int client; int interval = 0; /* * This vector corresponds to the hardware arbiter slots (see * the hardware documentation for semantics). We initialize * each slot with a suitable sentinel value outside the valid * range {0 .. NBR_OF_CLIENTS - 1} and replace them with * client indexes. Then it's fed to the hardware. */ s8 val[NBR_OF_SLOTS]; for (slot = 0; slot < NBR_OF_SLOTS; slot++) val[slot] = -1; for (client = 0; client < arbiters[arbiter].nbr_clients; client++) { int pos; /* Allocate the requested non-zero number of slots, but * also give clients with zero-requests one slot each * while stocks last. We do the latter here, in client * order. This makes sure zero-request clients are the * first to get to any spare slots, else those slots * could, when bandwidth is allocated close to the limit, * all be allocated to low-index non-zero-request clients * in the default-fill loop below. Another positive but * secondary effect is a somewhat better spread of the * zero-bandwidth clients in the vector, avoiding some of * the latency that could otherwise be caused by the * partitioning of non-zero-bandwidth clients at low * indexes and zero-bandwidth clients at high * indexes. (Note that this spreading can only affect the * unallocated bandwidth.) All the above only matters for * memory-intensive situations, of course. */ if (!arbiters[arbiter].requested_slots[region][client]) { /* * Skip inactive clients. Also skip zero-slot * allocations in this pass when there are no known * free slots. */ if (!arbiters[arbiter].active_clients[region][client] || unused_slots <= 0) continue; unused_slots--; /* Only allocate one slot for this client. */ interval = NBR_OF_SLOTS; } else interval = NBR_OF_SLOTS / arbiters[arbiter].requested_slots[region][client]; pos = 0; while (pos < NBR_OF_SLOTS) { if (val[pos] >= 0) pos++; else { val[pos] = client; pos += interval; } } } client = 0; for (slot = 0; slot < NBR_OF_SLOTS; slot++) { /* * Allocate remaining slots in round-robin * client-number order for active clients. For this * pass, we ignore requested bandwidth and previous * allocations. */ if (val[slot] < 0) { int first = client; while (!arbiters[arbiter].active_clients[region][client]) { client = (client + 1) % arbiters[arbiter].nbr_clients; if (client == first) break; } val[slot] = client; client = (client + 1) % arbiters[arbiter].nbr_clients; } if (arbiter == 0) { if (region == EXT_REGION) REG_WR_INT_VECT(marb_foo, regi_marb_foo, rw_l2_slots, slot, val[slot]); else if (region == INT_REGION) REG_WR_INT_VECT(marb_foo, regi_marb_foo, rw_intm_slots, slot, val[slot]); } else { REG_WR_INT_VECT(marb_bar, regi_marb_bar, rw_ddr2_slots, slot, val[slot]); } } } extern char _stext, _etext; static void crisv32_arbiter_init(void) { static int initialized; if (initialized) return; initialized = 1; /* * CPU caches are always set to active, but with zero * bandwidth allocated. It should be ok to allocate zero * bandwidth for the caches, because DMA for other channels * will supposedly finish, once their programmed amount is * done, and then the caches will get access according to the * "fixed scheme" for unclaimed slots. Though, if for some * use-case somewhere, there's a maximum CPU latency for * e.g. some interrupt, we have to start allocating specific * bandwidth for the CPU caches too. */ arbiters[0].active_clients[EXT_REGION][11] = 1; arbiters[0].active_clients[EXT_REGION][12] = 1; crisv32_arbiter_config(0, EXT_REGION, 0); crisv32_arbiter_config(0, INT_REGION, 0); crisv32_arbiter_config(1, EXT_REGION, 0); if (request_irq(MEMARB_FOO_INTR_VECT, crisv32_foo_arbiter_irq, IRQF_DISABLED, "arbiter", NULL)) printk(KERN_ERR "Couldn't allocate arbiter IRQ\n"); if (request_irq(MEMARB_BAR_INTR_VECT, crisv32_bar_arbiter_irq, IRQF_DISABLED, "arbiter", NULL)) printk(KERN_ERR "Couldn't allocate arbiter IRQ\n"); #ifndef CONFIG_ETRAX_KGDB /* Global watch for writes to kernel text segment. */ crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext, MARB_CLIENTS(arbiter_all_clients, arbiter_bar_all_clients), arbiter_all_write, NULL); #endif /* Set up max burst sizes by default */ REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_rd_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_wr_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_ccd_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_wr_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_rd_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_rd_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_vout_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_fifo_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_l2cache_burst, 3); } int crisv32_arbiter_allocate_bandwidth(int client, int region, unsigned long bandwidth) { int i; int total_assigned = 0; int total_clients = 0; int req; int arbiter = 0; crisv32_arbiter_init(); if (client & 0xffff0000) { arbiter = 1; client >>= 16; } for (i = 0; i < arbiters[arbiter].nbr_clients; i++) { total_assigned += arbiters[arbiter].requested_slots[region][i]; total_clients += arbiters[arbiter].active_clients[region][i]; } /* Avoid division by 0 for 0-bandwidth requests. */ req = bandwidth == 0 ? 0 : NBR_OF_SLOTS / (max_bandwidth[region] / bandwidth); /* * We make sure that there are enough slots only for non-zero * requests. Requesting 0 bandwidth *may* allocate slots, * though if all bandwidth is allocated, such a client won't * get any and will have to rely on getting memory access * according to the fixed scheme that's the default when one * of the slot-allocated clients doesn't claim their slot. */ if (total_assigned + req > NBR_OF_SLOTS) return -ENOMEM; arbiters[arbiter].active_clients[region][client] = 1; arbiters[arbiter].requested_slots[region][client] = req; crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned); /* Propagate allocation from foo to bar */ if (arbiter == 0) crisv32_arbiter_allocate_bandwidth(8 << 16, EXT_REGION, bandwidth); return 0; } /* * Main entry for bandwidth deallocation. * * Strictly speaking, for a somewhat constant set of clients where * each client gets a constant bandwidth and is just enabled or * disabled (somewhat dynamically), no action is necessary here to * avoid starvation for non-zero-allocation clients, as the allocated * slots will just be unused. However, handing out those unused slots * to active clients avoids needless latency if the "fixed scheme" * would give unclaimed slots to an eager low-index client. */ void crisv32_arbiter_deallocate_bandwidth(int client, int region) { int i; int total_assigned = 0; int arbiter = 0; if (client & 0xffff0000) arbiter = 1; arbiters[arbiter].requested_slots[region][client] = 0; arbiters[arbiter].active_clients[region][client] = 0; for (i = 0; i < arbiters[arbiter].nbr_clients; i++) total_assigned += arbiters[arbiter].requested_slots[region][i]; crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned); } int crisv32_arbiter_watch(unsigned long start, unsigned long size, unsigned long clients, unsigned long accesses, watch_callback *cb) { int i; int arbiter; int used[2]; int ret = 0; crisv32_arbiter_init(); if (start > 0x80000000) { printk(KERN_ERR "Arbiter: %lX doesn't look like a " "physical address", start); return -EFAULT; } spin_lock(&arbiter_lock); if (clients & 0xffff) used[0] = 1; if (clients & 0xffff0000) used[1] = 1; for (arbiter = 0; arbiter < ARBITERS; arbiter++) { if (!used[arbiter]) continue; for (i = 0; i < NUMBER_OF_BP; i++) { if (!watches[arbiter][i].used) { unsigned intr_mask; if (arbiter) intr_mask = REG_RD_INT(marb_bar, regi_marb_bar, rw_intr_mask); else intr_mask = REG_RD_INT(marb_foo, regi_marb_foo, rw_intr_mask); watches[arbiter][i].used = 1; watches[arbiter][i].start = start; watches[arbiter][i].end = start + size; watches[arbiter][i].cb = cb; ret |= (i + 1) << (arbiter + 8); if (arbiter) { REG_WR_INT(marb_bar_bp, watches[arbiter][i].instance, rw_first_addr, watches[arbiter][i].start); REG_WR_INT(marb_bar_bp, watches[arbiter][i].instance, rw_last_addr, watches[arbiter][i].end); REG_WR_INT(marb_bar_bp, watches[arbiter][i].instance, rw_op, accesses); REG_WR_INT(marb_bar_bp, watches[arbiter][i].instance, rw_clients, clients & 0xffff); } else { REG_WR_INT(marb_foo_bp, watches[arbiter][i].instance, rw_first_addr, watches[arbiter][i].start); REG_WR_INT(marb_foo_bp, watches[arbiter][i].instance, rw_last_addr, watches[arbiter][i].end); REG_WR_INT(marb_foo_bp, watches[arbiter][i].instance, rw_op, accesses); REG_WR_INT(marb_foo_bp, watches[arbiter][i].instance, rw_clients, clients >> 16); } if (i == 0) intr_mask |= 1; else if (i == 1) intr_mask |= 2; else if (i == 2) intr_mask |= 4; else if (i == 3) intr_mask |= 8; if (arbiter) REG_WR_INT(marb_bar, regi_marb_bar, rw_intr_mask, intr_mask); else REG_WR_INT(marb_foo, regi_marb_foo, rw_intr_mask, intr_mask); spin_unlock(&arbiter_lock); break; } } } spin_unlock(&arbiter_lock); if (ret) return ret; else return -ENOMEM; } int crisv32_arbiter_unwatch(int id) { int arbiter; int intr_mask; crisv32_arbiter_init(); spin_lock(&arbiter_lock); for (arbiter = 0; arbiter < ARBITERS; arbiter++) { int id2; if (arbiter) intr_mask = REG_RD_INT(marb_bar, regi_marb_bar, rw_intr_mask); else intr_mask = REG_RD_INT(marb_foo, regi_marb_foo, rw_intr_mask); id2 = (id & (0xff << (arbiter + 8))) >> (arbiter + 8); if (id2 == 0) continue; id2--; if ((id2 >= NUMBER_OF_BP) || (!watches[arbiter][id2].used)) { spin_unlock(&arbiter_lock); return -EINVAL; } memset(&watches[arbiter][id2], 0, sizeof(struct crisv32_watch_entry)); if (id2 == 0) intr_mask &= ~1; else if (id2 == 1) intr_mask &= ~2; else if (id2 == 2) intr_mask &= ~4; else if (id2 == 3) intr_mask &= ~8; if (arbiter) REG_WR_INT(marb_bar, regi_marb_bar, rw_intr_mask, intr_mask); else REG_WR_INT(marb_foo, regi_marb_foo, rw_intr_mask, intr_mask); } spin_unlock(&arbiter_lock); return 0; } extern void show_registers(struct pt_regs *regs); static irqreturn_t crisv32_foo_arbiter_irq(int irq, void *dev_id) { reg_marb_foo_r_masked_intr masked_intr = REG_RD(marb_foo, regi_marb_foo, r_masked_intr); reg_marb_foo_bp_r_brk_clients r_clients; reg_marb_foo_bp_r_brk_addr r_addr; reg_marb_foo_bp_r_brk_op r_op; reg_marb_foo_bp_r_brk_first_client r_first; reg_marb_foo_bp_r_brk_size r_size; reg_marb_foo_bp_rw_ack ack = {0}; reg_marb_foo_rw_ack_intr ack_intr = { .bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1 }; struct crisv32_watch_entry *watch; unsigned arbiter = (unsigned)dev_id; masked_intr = REG_RD(marb_foo, regi_marb_foo, r_masked_intr); if (masked_intr.bp0) watch = &watches[arbiter][0]; else if (masked_intr.bp1) watch = &watches[arbiter][1]; else if (masked_intr.bp2) watch = &watches[arbiter][2]; else if (masked_intr.bp3) watch = &watches[arbiter][3]; else return IRQ_NONE; /* Retrieve all useful information and print it. */ r_clients = REG_RD(marb_foo_bp, watch->instance, r_brk_clients); r_addr = REG_RD(marb_foo_bp, watch->instance, r_brk_addr); r_op = REG_RD(marb_foo_bp, watch->instance, r_brk_op); r_first = REG_RD(marb_foo_bp, watch->instance, r_brk_first_client); r_size = REG_RD(marb_foo_bp, watch->instance, r_brk_size); printk(KERN_DEBUG "Arbiter IRQ\n"); printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n", REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_clients, r_clients), REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_addr, r_addr), REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_op, r_op), REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_first_client, r_first), REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_size, r_size)); REG_WR(marb_foo_bp, watch->instance, rw_ack, ack); REG_WR(marb_foo, regi_marb_foo, rw_ack_intr, ack_intr); printk(KERN_DEBUG "IRQ occurred at %X\n", (unsigned)get_irq_regs()); if (watch->cb) watch->cb(); return IRQ_HANDLED; } static irqreturn_t crisv32_bar_arbiter_irq(int irq, void *dev_id) { reg_marb_bar_r_masked_intr masked_intr = REG_RD(marb_bar, regi_marb_bar, r_masked_intr); reg_marb_bar_bp_r_brk_clients r_clients; reg_marb_bar_bp_r_brk_addr r_addr; reg_marb_bar_bp_r_brk_op r_op; reg_marb_bar_bp_r_brk_first_client r_first; reg_marb_bar_bp_r_brk_size r_size; reg_marb_bar_bp_rw_ack ack = {0}; reg_marb_bar_rw_ack_intr ack_intr = { .bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1 }; struct crisv32_watch_entry *watch; unsigned arbiter = (unsigned)dev_id; masked_intr = REG_RD(marb_bar, regi_marb_bar, r_masked_intr); if (masked_intr.bp0) watch = &watches[arbiter][0]; else if (masked_intr.bp1) watch = &watches[arbiter][1]; else if (masked_intr.bp2) watch = &watches[arbiter][2]; else if (masked_intr.bp3) watch = &watches[arbiter][3]; else return IRQ_NONE; /* Retrieve all useful information and print it. */ r_clients = REG_RD(marb_bar_bp, watch->instance, r_brk_clients); r_addr = REG_RD(marb_bar_bp, watch->instance, r_brk_addr); r_op = REG_RD(marb_bar_bp, watch->instance, r_brk_op); r_first = REG_RD(marb_bar_bp, watch->instance, r_brk_first_client); r_size = REG_RD(marb_bar_bp, watch->instance, r_brk_size); printk(KERN_DEBUG "Arbiter IRQ\n"); printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n", REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_clients, r_clients), REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_addr, r_addr), REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_op, r_op), REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_first_client, r_first), REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_size, r_size)); REG_WR(marb_bar_bp, watch->instance, rw_ack, ack); REG_WR(marb_bar, regi_marb_bar, rw_ack_intr, ack_intr); printk(KERN_DEBUG "IRQ occurred at %X\n", (unsigned)get_irq_regs()->erp); if (watch->cb) watch->cb(); return IRQ_HANDLED; }
gpl-2.0
segment-routing/openwrt
net/ceph/auth_x.c
151
19211
#include <linux/ceph/ceph_debug.h> #include <linux/err.h> #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> #include <linux/ceph/libceph.h> #include <linux/ceph/messenger.h> #include "crypto.h" #include "auth_x.h" #include "auth_x_protocol.h" static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed); static int ceph_x_is_authenticated(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; int need; ceph_x_validate_tickets(ac, &need); dout("ceph_x_is_authenticated want=%d need=%d have=%d\n", ac->want_keys, need, xi->have_keys); return (ac->want_keys & xi->have_keys) == ac->want_keys; } static int ceph_x_should_authenticate(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; int need; ceph_x_validate_tickets(ac, &need); dout("ceph_x_should_authenticate want=%d need=%d have=%d\n", ac->want_keys, need, xi->have_keys); return need != 0; } static int ceph_x_encrypt_buflen(int ilen) { return sizeof(struct ceph_x_encrypt_header) + ilen + 16 + sizeof(u32); } static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *ibuf, int ilen, void *obuf, size_t olen) { struct ceph_x_encrypt_header head = { .struct_v = 1, .magic = cpu_to_le64(CEPHX_ENC_MAGIC) }; size_t len = olen - sizeof(u32); int ret; ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len, &head, sizeof(head), ibuf, ilen); if (ret) return ret; ceph_encode_32(&obuf, len); return len + sizeof(u32); } static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end, void **obuf, size_t olen) { struct ceph_x_encrypt_header head; size_t head_len = sizeof(head); int len, ret; len = ceph_decode_32(p); if (*p + len > end) return -EINVAL; dout("ceph_x_decrypt len %d\n", len); if (*obuf == NULL) { *obuf = kmalloc(len, GFP_NOFS); if (!*obuf) return -ENOMEM; olen = len; } ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len); if (ret) return ret; if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) return -EPERM; *p += len; return olen; } /* * get existing (or insert new) ticket handler */ static struct ceph_x_ticket_handler * get_ticket_handler(struct ceph_auth_client *ac, int service) { struct ceph_x_ticket_handler *th; struct ceph_x_info *xi = ac->private; struct rb_node *parent = NULL, **p = &xi->ticket_handlers.rb_node; while (*p) { parent = *p; th = rb_entry(parent, struct ceph_x_ticket_handler, node); if (service < th->service) p = &(*p)->rb_left; else if (service > th->service) p = &(*p)->rb_right; else return th; } /* add it */ th = kzalloc(sizeof(*th), GFP_NOFS); if (!th) return ERR_PTR(-ENOMEM); th->service = service; rb_link_node(&th->node, parent, p); rb_insert_color(&th->node, &xi->ticket_handlers); return th; } static void remove_ticket_handler(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th) { struct ceph_x_info *xi = ac->private; dout("remove_ticket_handler %p %d\n", th, th->service); rb_erase(&th->node, &xi->ticket_handlers); ceph_crypto_key_destroy(&th->session_key); if (th->ticket_blob) ceph_buffer_put(th->ticket_blob); kfree(th); } static int process_one_ticket(struct ceph_auth_client *ac, struct ceph_crypto_key *secret, void **p, void *end) { struct ceph_x_info *xi = ac->private; int type; u8 tkt_struct_v, blob_struct_v; struct ceph_x_ticket_handler *th; void *dbuf = NULL; void *dp, *dend; int dlen; char is_enc; struct timespec validity; struct ceph_crypto_key old_key; void *ticket_buf = NULL; void *tp, *tpend; void **ptp; struct ceph_timespec new_validity; struct ceph_crypto_key new_session_key; struct ceph_buffer *new_ticket_blob; unsigned long new_expires, new_renew_after; u64 new_secret_id; int ret; ceph_decode_need(p, end, sizeof(u32) + 1, bad); type = ceph_decode_32(p); dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); tkt_struct_v = ceph_decode_8(p); if (tkt_struct_v != 1) goto bad; th = get_ticket_handler(ac, type); if (IS_ERR(th)) { ret = PTR_ERR(th); goto out; } /* blob for me */ dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0); if (dlen <= 0) { ret = dlen; goto out; } dout(" decrypted %d bytes\n", dlen); dp = dbuf; dend = dp + dlen; tkt_struct_v = ceph_decode_8(&dp); if (tkt_struct_v != 1) goto bad; memcpy(&old_key, &th->session_key, sizeof(old_key)); ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); if (ret) goto out; ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); ceph_decode_timespec(&validity, &new_validity); new_expires = get_seconds() + validity.tv_sec; new_renew_after = new_expires - (validity.tv_sec / 4); dout(" expires=%lu renew_after=%lu\n", new_expires, new_renew_after); /* ticket blob for service */ ceph_decode_8_safe(p, end, is_enc, bad); if (is_enc) { /* encrypted */ dout(" encrypted ticket\n"); dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0); if (dlen < 0) { ret = dlen; goto out; } tp = ticket_buf; ptp = &tp; tpend = *ptp + dlen; } else { /* unencrypted */ ptp = p; tpend = end; } ceph_decode_32_safe(ptp, tpend, dlen, bad); dout(" ticket blob is %d bytes\n", dlen); ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad); blob_struct_v = ceph_decode_8(ptp); new_secret_id = ceph_decode_64(ptp); ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend); if (ret) goto out; /* all is well, update our ticket */ ceph_crypto_key_destroy(&th->session_key); if (th->ticket_blob) ceph_buffer_put(th->ticket_blob); th->session_key = new_session_key; th->ticket_blob = new_ticket_blob; th->validity = new_validity; th->secret_id = new_secret_id; th->expires = new_expires; th->renew_after = new_renew_after; dout(" got ticket service %d (%s) secret_id %lld len %d\n", type, ceph_entity_type_name(type), th->secret_id, (int)th->ticket_blob->vec.iov_len); xi->have_keys |= th->service; out: kfree(ticket_buf); kfree(dbuf); return ret; bad: ret = -EINVAL; goto out; } static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, struct ceph_crypto_key *secret, void *buf, void *end) { void *p = buf; u8 reply_struct_v; u32 num; int ret; ceph_decode_8_safe(&p, end, reply_struct_v, bad); if (reply_struct_v != 1) return -EINVAL; ceph_decode_32_safe(&p, end, num, bad); dout("%d tickets\n", num); while (num--) { ret = process_one_ticket(ac, secret, &p, end); if (ret) return ret; } return 0; bad: return -EINVAL; } static void ceph_x_authorizer_cleanup(struct ceph_x_authorizer *au) { ceph_crypto_key_destroy(&au->session_key); if (au->buf) { ceph_buffer_put(au->buf); au->buf = NULL; } } static int ceph_x_build_authorizer(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th, struct ceph_x_authorizer *au) { int maxlen; struct ceph_x_authorize_a *msg_a; struct ceph_x_authorize_b msg_b; void *p, *end; int ret; int ticket_blob_len = (th->ticket_blob ? th->ticket_blob->vec.iov_len : 0); dout("build_authorizer for %s %p\n", ceph_entity_type_name(th->service), au); ceph_crypto_key_destroy(&au->session_key); ret = ceph_crypto_key_clone(&au->session_key, &th->session_key); if (ret) goto out_au; maxlen = sizeof(*msg_a) + sizeof(msg_b) + ceph_x_encrypt_buflen(ticket_blob_len); dout(" need len %d\n", maxlen); if (au->buf && au->buf->alloc_len < maxlen) { ceph_buffer_put(au->buf); au->buf = NULL; } if (!au->buf) { au->buf = ceph_buffer_new(maxlen, GFP_NOFS); if (!au->buf) { ret = -ENOMEM; goto out_au; } } au->service = th->service; au->secret_id = th->secret_id; msg_a = au->buf->vec.iov_base; msg_a->struct_v = 1; msg_a->global_id = cpu_to_le64(ac->global_id); msg_a->service_id = cpu_to_le32(th->service); msg_a->ticket_blob.struct_v = 1; msg_a->ticket_blob.secret_id = cpu_to_le64(th->secret_id); msg_a->ticket_blob.blob_len = cpu_to_le32(ticket_blob_len); if (ticket_blob_len) { memcpy(msg_a->ticket_blob.blob, th->ticket_blob->vec.iov_base, th->ticket_blob->vec.iov_len); } dout(" th %p secret_id %lld %lld\n", th, th->secret_id, le64_to_cpu(msg_a->ticket_blob.secret_id)); p = msg_a + 1; p += ticket_blob_len; end = au->buf->vec.iov_base + au->buf->vec.iov_len; get_random_bytes(&au->nonce, sizeof(au->nonce)); msg_b.struct_v = 1; msg_b.nonce = cpu_to_le64(au->nonce); ret = ceph_x_encrypt(&au->session_key, &msg_b, sizeof(msg_b), p, end - p); if (ret < 0) goto out_au; p += ret; au->buf->vec.iov_len = p - au->buf->vec.iov_base; dout(" built authorizer nonce %llx len %d\n", au->nonce, (int)au->buf->vec.iov_len); BUG_ON(au->buf->vec.iov_len > maxlen); return 0; out_au: ceph_x_authorizer_cleanup(au); return ret; } static int ceph_x_encode_ticket(struct ceph_x_ticket_handler *th, void **p, void *end) { ceph_decode_need(p, end, 1 + sizeof(u64), bad); ceph_encode_8(p, 1); ceph_encode_64(p, th->secret_id); if (th->ticket_blob) { const char *buf = th->ticket_blob->vec.iov_base; u32 len = th->ticket_blob->vec.iov_len; ceph_encode_32_safe(p, end, len, bad); ceph_encode_copy_safe(p, end, buf, len, bad); } else { ceph_encode_32_safe(p, end, 0, bad); } return 0; bad: return -ERANGE; } static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed) { int want = ac->want_keys; struct ceph_x_info *xi = ac->private; int service; *pneed = ac->want_keys & ~(xi->have_keys); for (service = 1; service <= want; service <<= 1) { struct ceph_x_ticket_handler *th; if (!(ac->want_keys & service)) continue; if (*pneed & service) continue; th = get_ticket_handler(ac, service); if (IS_ERR(th)) { *pneed |= service; continue; } if (get_seconds() >= th->renew_after) *pneed |= service; if (get_seconds() >= th->expires) xi->have_keys &= ~service; } } static int ceph_x_build_request(struct ceph_auth_client *ac, void *buf, void *end) { struct ceph_x_info *xi = ac->private; int need; struct ceph_x_request_header *head = buf; int ret; struct ceph_x_ticket_handler *th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); if (IS_ERR(th)) return PTR_ERR(th); ceph_x_validate_tickets(ac, &need); dout("build_request want %x have %x need %x\n", ac->want_keys, xi->have_keys, need); if (need & CEPH_ENTITY_TYPE_AUTH) { struct ceph_x_authenticate *auth = (void *)(head + 1); void *p = auth + 1; struct ceph_x_challenge_blob tmp; char tmp_enc[40]; u64 *u; if (p > end) return -ERANGE; dout(" get_auth_session_key\n"); head->op = cpu_to_le16(CEPHX_GET_AUTH_SESSION_KEY); /* encrypt and hash */ get_random_bytes(&auth->client_challenge, sizeof(u64)); tmp.client_challenge = auth->client_challenge; tmp.server_challenge = cpu_to_le64(xi->server_challenge); ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp), tmp_enc, sizeof(tmp_enc)); if (ret < 0) return ret; auth->struct_v = 1; auth->key = 0; for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++) auth->key ^= *(__le64 *)u; dout(" server_challenge %llx client_challenge %llx key %llx\n", xi->server_challenge, le64_to_cpu(auth->client_challenge), le64_to_cpu(auth->key)); /* now encode the old ticket if exists */ ret = ceph_x_encode_ticket(th, &p, end); if (ret < 0) return ret; return p - buf; } if (need) { void *p = head + 1; struct ceph_x_service_ticket_request *req; if (p > end) return -ERANGE; head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY); ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer); if (ret) return ret; ceph_encode_copy(&p, xi->auth_authorizer.buf->vec.iov_base, xi->auth_authorizer.buf->vec.iov_len); req = p; req->keys = cpu_to_le32(need); p += sizeof(*req); return p - buf; } return 0; } static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, void *buf, void *end) { struct ceph_x_info *xi = ac->private; struct ceph_x_reply_header *head = buf; struct ceph_x_ticket_handler *th; int len = end - buf; int op; int ret; if (result) return result; /* XXX hmm? */ if (xi->starting) { /* it's a hello */ struct ceph_x_server_challenge *sc = buf; if (len != sizeof(*sc)) return -EINVAL; xi->server_challenge = le64_to_cpu(sc->server_challenge); dout("handle_reply got server challenge %llx\n", xi->server_challenge); xi->starting = false; xi->have_keys &= ~CEPH_ENTITY_TYPE_AUTH; return -EAGAIN; } op = le16_to_cpu(head->op); result = le32_to_cpu(head->result); dout("handle_reply op %d result %d\n", op, result); switch (op) { case CEPHX_GET_AUTH_SESSION_KEY: /* verify auth key */ ret = ceph_x_proc_ticket_reply(ac, &xi->secret, buf + sizeof(*head), end); break; case CEPHX_GET_PRINCIPAL_SESSION_KEY: th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); if (IS_ERR(th)) return PTR_ERR(th); ret = ceph_x_proc_ticket_reply(ac, &th->session_key, buf + sizeof(*head), end); break; default: return -EINVAL; } if (ret) return ret; if (ac->want_keys == xi->have_keys) return 0; return -EAGAIN; } static int ceph_x_create_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; int ret; th = get_ticket_handler(ac, peer_type); if (IS_ERR(th)) return PTR_ERR(th); au = kzalloc(sizeof(*au), GFP_NOFS); if (!au) return -ENOMEM; ret = ceph_x_build_authorizer(ac, th, au); if (ret) { kfree(au); return ret; } auth->authorizer = (struct ceph_authorizer *) au; auth->authorizer_buf = au->buf->vec.iov_base; auth->authorizer_buf_len = au->buf->vec.iov_len; auth->authorizer_reply_buf = au->reply_buf; auth->authorizer_reply_buf_len = sizeof (au->reply_buf); auth->sign_message = ac->ops->sign_message; auth->check_message_signature = ac->ops->check_message_signature; return 0; } static int ceph_x_update_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; th = get_ticket_handler(ac, peer_type); if (IS_ERR(th)) return PTR_ERR(th); au = (struct ceph_x_authorizer *)auth->authorizer; if (au->secret_id < th->secret_id) { dout("ceph_x_update_authorizer service %u secret %llu < %llu\n", au->service, au->secret_id, th->secret_id); return ceph_x_build_authorizer(ac, th, au); } return 0; } static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len) { struct ceph_x_authorizer *au = (void *)a; int ret = 0; struct ceph_x_authorize_reply reply; void *preply = &reply; void *p = au->reply_buf; void *end = p + sizeof(au->reply_buf); ret = ceph_x_decrypt(&au->session_key, &p, end, &preply, sizeof(reply)); if (ret < 0) return ret; if (ret != sizeof(reply)) return -EPERM; if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one)) ret = -EPERM; else ret = 0; dout("verify_authorizer_reply nonce %llx got %llx ret %d\n", au->nonce, le64_to_cpu(reply.nonce_plus_one), ret); return ret; } static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac, struct ceph_authorizer *a) { struct ceph_x_authorizer *au = (void *)a; ceph_x_authorizer_cleanup(au); kfree(au); } static void ceph_x_reset(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; dout("reset\n"); xi->starting = true; xi->server_challenge = 0; } static void ceph_x_destroy(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; struct rb_node *p; dout("ceph_x_destroy %p\n", ac); ceph_crypto_key_destroy(&xi->secret); while ((p = rb_first(&xi->ticket_handlers)) != NULL) { struct ceph_x_ticket_handler *th = rb_entry(p, struct ceph_x_ticket_handler, node); remove_ticket_handler(ac, th); } ceph_x_authorizer_cleanup(&xi->auth_authorizer); kfree(ac->private); ac->private = NULL; } static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type) { struct ceph_x_ticket_handler *th; th = get_ticket_handler(ac, peer_type); if (!IS_ERR(th)) memset(&th->validity, 0, sizeof(th->validity)); } static int calcu_signature(struct ceph_x_authorizer *au, struct ceph_msg *msg, __le64 *sig) { int ret; char tmp_enc[40]; __le32 tmp[5] = { cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc, msg->footer.middle_crc, msg->footer.data_crc, }; ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp), tmp_enc, sizeof(tmp_enc)); if (ret < 0) return ret; *sig = *(__le64*)(tmp_enc + 4); return 0; } static int ceph_x_sign_message(struct ceph_auth_handshake *auth, struct ceph_msg *msg) { int ret; if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN)) return 0; ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer, msg, &msg->footer.sig); if (ret < 0) return ret; msg->footer.flags |= CEPH_MSG_FOOTER_SIGNED; return 0; } static int ceph_x_check_message_signature(struct ceph_auth_handshake *auth, struct ceph_msg *msg) { __le64 sig_check; int ret; if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN)) return 0; ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer, msg, &sig_check); if (ret < 0) return ret; if (sig_check == msg->footer.sig) return 0; if (msg->footer.flags & CEPH_MSG_FOOTER_SIGNED) dout("ceph_x_check_message_signature %p has signature %llx " "expect %llx\n", msg, msg->footer.sig, sig_check); else dout("ceph_x_check_message_signature %p sender did not set " "CEPH_MSG_FOOTER_SIGNED\n", msg); return -EBADMSG; } static const struct ceph_auth_client_ops ceph_x_ops = { .name = "x", .is_authenticated = ceph_x_is_authenticated, .should_authenticate = ceph_x_should_authenticate, .build_request = ceph_x_build_request, .handle_reply = ceph_x_handle_reply, .create_authorizer = ceph_x_create_authorizer, .update_authorizer = ceph_x_update_authorizer, .verify_authorizer_reply = ceph_x_verify_authorizer_reply, .destroy_authorizer = ceph_x_destroy_authorizer, .invalidate_authorizer = ceph_x_invalidate_authorizer, .reset = ceph_x_reset, .destroy = ceph_x_destroy, .sign_message = ceph_x_sign_message, .check_message_signature = ceph_x_check_message_signature, }; int ceph_x_init(struct ceph_auth_client *ac) { struct ceph_x_info *xi; int ret; dout("ceph_x_init %p\n", ac); ret = -ENOMEM; xi = kzalloc(sizeof(*xi), GFP_NOFS); if (!xi) goto out; ret = -EINVAL; if (!ac->key) { pr_err("no secret set (for auth_x protocol)\n"); goto out_nomem; } ret = ceph_crypto_key_clone(&xi->secret, ac->key); if (ret < 0) { pr_err("cannot clone key: %d\n", ret); goto out_nomem; } xi->starting = true; xi->ticket_handlers = RB_ROOT; ac->protocol = CEPH_AUTH_CEPHX; ac->private = xi; ac->ops = &ceph_x_ops; return 0; out_nomem: kfree(xi); out: return ret; }
gpl-2.0
leksmax/840v4_vocore
package/libs/libnl-tiny/src/genl_ctrl.c
663
6939
/* * lib/genl/ctrl.c Generic Netlink Controller * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation version 2.1 * of the License. * * Copyright (c) 2003-2008 Thomas Graf <tgraf@suug.ch> */ /** * @ingroup genl_mngt * @defgroup ctrl Controller * @brief * * @{ */ #include <netlink-generic.h> #include <netlink/netlink.h> #include <netlink/genl/genl.h> #include <netlink/genl/family.h> #include <netlink/genl/mngt.h> #include <netlink/genl/ctrl.h> #include <netlink/utils.h> /** @cond SKIP */ #define CTRL_VERSION 0x0001 static struct nl_cache_ops genl_ctrl_ops; /** @endcond */ static int ctrl_request_update(struct nl_cache *c, struct nl_sock *h) { return genl_send_simple(h, GENL_ID_CTRL, CTRL_CMD_GETFAMILY, CTRL_VERSION, NLM_F_DUMP); } static struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = { [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 }, [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_STRING, .maxlen = GENL_NAMSIZ }, [CTRL_ATTR_VERSION] = { .type = NLA_U32 }, [CTRL_ATTR_HDRSIZE] = { .type = NLA_U32 }, [CTRL_ATTR_MAXATTR] = { .type = NLA_U32 }, [CTRL_ATTR_OPS] = { .type = NLA_NESTED }, }; static struct nla_policy family_op_policy[CTRL_ATTR_OP_MAX+1] = { [CTRL_ATTR_OP_ID] = { .type = NLA_U32 }, [CTRL_ATTR_OP_FLAGS] = { .type = NLA_U32 }, }; static int ctrl_msg_parser(struct nl_cache_ops *ops, struct genl_cmd *cmd, struct genl_info *info, void *arg) { struct genl_family *family; struct nl_parser_param *pp = arg; int err; family = genl_family_alloc(); if (family == NULL) { err = -NLE_NOMEM; goto errout; } if (info->attrs[CTRL_ATTR_FAMILY_NAME] == NULL) { err = -NLE_MISSING_ATTR; goto errout; } if (info->attrs[CTRL_ATTR_FAMILY_ID] == NULL) { err = -NLE_MISSING_ATTR; goto errout; } family->ce_msgtype = info->nlh->nlmsg_type; genl_family_set_id(family, nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID])); genl_family_set_name(family, nla_get_string(info->attrs[CTRL_ATTR_FAMILY_NAME])); if (info->attrs[CTRL_ATTR_VERSION]) { uint32_t version = nla_get_u32(info->attrs[CTRL_ATTR_VERSION]); genl_family_set_version(family, version); } if (info->attrs[CTRL_ATTR_HDRSIZE]) { uint32_t hdrsize = nla_get_u32(info->attrs[CTRL_ATTR_HDRSIZE]); genl_family_set_hdrsize(family, hdrsize); } if (info->attrs[CTRL_ATTR_MAXATTR]) { uint32_t maxattr = nla_get_u32(info->attrs[CTRL_ATTR_MAXATTR]); genl_family_set_maxattr(family, maxattr); } if (info->attrs[CTRL_ATTR_OPS]) { struct nlattr *nla, *nla_ops; int remaining; nla_ops = info->attrs[CTRL_ATTR_OPS]; nla_for_each_nested(nla, nla_ops, remaining) { struct nlattr *tb[CTRL_ATTR_OP_MAX+1]; int flags = 0, id; err = nla_parse_nested(tb, CTRL_ATTR_OP_MAX, nla, family_op_policy); if (err < 0) goto errout; if (tb[CTRL_ATTR_OP_ID] == NULL) { err = -NLE_MISSING_ATTR; goto errout; } id = nla_get_u32(tb[CTRL_ATTR_OP_ID]); if (tb[CTRL_ATTR_OP_FLAGS]) flags = nla_get_u32(tb[CTRL_ATTR_OP_FLAGS]); err = genl_family_add_op(family, id, flags); if (err < 0) goto errout; } } err = pp->pp_cb((struct nl_object *) family, pp); errout: genl_family_put(family); return err; } /** * @name Cache Management * @{ */ int genl_ctrl_alloc_cache(struct nl_sock *sock, struct nl_cache **result) { return nl_cache_alloc_and_fill(&genl_ctrl_ops, sock, result); } /** * Look up generic netlink family by id in the provided cache. * @arg cache Generic netlink family cache. * @arg id Family identifier. * * Searches through the cache looking for a registered family * matching the specified identifier. The caller will own a * reference on the returned object which needs to be given * back after usage using genl_family_put(). * * @return Generic netlink family object or NULL if no match was found. */ struct genl_family *genl_ctrl_search(struct nl_cache *cache, int id) { struct genl_family *fam; if (cache->c_ops != &genl_ctrl_ops) BUG(); nl_list_for_each_entry(fam, &cache->c_items, ce_list) { if (fam->gf_id == id) { nl_object_get((struct nl_object *) fam); return fam; } } return NULL; } /** * @name Resolver * @{ */ /** * Look up generic netlink family by family name in the provided cache. * @arg cache Generic netlink family cache. * @arg name Family name. * * Searches through the cache looking for a registered family * matching the specified name. The caller will own a reference * on the returned object which needs to be given back after * usage using genl_family_put(). * * @return Generic netlink family object or NULL if no match was found. */ struct genl_family *genl_ctrl_search_by_name(struct nl_cache *cache, const char *name) { struct genl_family *fam; if (cache->c_ops != &genl_ctrl_ops) BUG(); nl_list_for_each_entry(fam, &cache->c_items, ce_list) { if (!strcmp(name, fam->gf_name)) { nl_object_get((struct nl_object *) fam); return fam; } } return NULL; } /** @} */ /** * Resolve generic netlink family name to its identifier * @arg sk Netlink socket. * @arg name Name of generic netlink family * * Resolves the generic netlink family name to its identifer and returns * it. * * @return A positive identifier or a negative error code. */ int genl_ctrl_resolve(struct nl_sock *sk, const char *name) { struct nl_cache *cache; struct genl_family *family; int err; if ((err = genl_ctrl_alloc_cache(sk, &cache)) < 0) return err; family = genl_ctrl_search_by_name(cache, name); if (family == NULL) { err = -NLE_OBJ_NOTFOUND; goto errout; } err = genl_family_get_id(family); genl_family_put(family); errout: nl_cache_free(cache); return err; } /** @} */ static struct genl_cmd genl_cmds[] = { { .c_id = CTRL_CMD_NEWFAMILY, .c_name = "NEWFAMILY" , .c_maxattr = CTRL_ATTR_MAX, .c_attr_policy = ctrl_policy, .c_msg_parser = ctrl_msg_parser, }, { .c_id = CTRL_CMD_DELFAMILY, .c_name = "DELFAMILY" , }, { .c_id = CTRL_CMD_GETFAMILY, .c_name = "GETFAMILY" , }, { .c_id = CTRL_CMD_NEWOPS, .c_name = "NEWOPS" , }, { .c_id = CTRL_CMD_DELOPS, .c_name = "DELOPS" , }, }; static struct genl_ops genl_ops = { .o_cmds = genl_cmds, .o_ncmds = ARRAY_SIZE(genl_cmds), }; /** @cond SKIP */ extern struct nl_object_ops genl_family_ops; /** @endcond */ static struct nl_cache_ops genl_ctrl_ops = { .co_name = "genl/family", .co_hdrsize = GENL_HDRSIZE(0), .co_msgtypes = GENL_FAMILY(GENL_ID_CTRL, "nlctrl"), .co_genl = &genl_ops, .co_protocol = NETLINK_GENERIC, .co_request_update = ctrl_request_update, .co_obj_ops = &genl_family_ops, }; static void __init ctrl_init(void) { genl_register(&genl_ctrl_ops); } static void __exit ctrl_exit(void) { genl_unregister(&genl_ctrl_ops); } /** @} */
gpl-2.0
sagigrimberg/linux
lib/nlattr.c
919
12832
/* * NETLINK Netlink attributes * * Authors: Thomas Graf <tgraf@suug.ch> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/jiffies.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/types.h> #include <net/netlink.h> static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_U8] = sizeof(u8), [NLA_U16] = sizeof(u16), [NLA_U32] = sizeof(u32), [NLA_U64] = sizeof(u64), [NLA_MSECS] = sizeof(u64), [NLA_NESTED] = NLA_HDRLEN, [NLA_S8] = sizeof(s8), [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), }; static int validate_nla(const struct nlattr *nla, int maxtype, const struct nla_policy *policy) { const struct nla_policy *pt; int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); if (type <= 0 || type > maxtype) return 0; pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); switch (pt->type) { case NLA_FLAG: if (attrlen > 0) return -ERANGE; break; case NLA_NUL_STRING: if (pt->len) minlen = min_t(int, attrlen, pt->len + 1); else minlen = attrlen; if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) return -EINVAL; /* fall through */ case NLA_STRING: if (attrlen < 1) return -ERANGE; if (pt->len) { char *buf = nla_data(nla); if (buf[attrlen - 1] == '\0') attrlen--; if (attrlen > pt->len) return -ERANGE; } break; case NLA_BINARY: if (pt->len && attrlen > pt->len) return -ERANGE; break; case NLA_NESTED_COMPAT: if (attrlen < pt->len) return -ERANGE; if (attrlen < NLA_ALIGN(pt->len)) break; if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN) return -ERANGE; nla = nla_data(nla) + NLA_ALIGN(pt->len); if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla)) return -ERANGE; break; case NLA_NESTED: /* a nested attributes is allowed to be empty; if its not, * it must have a size of at least NLA_HDRLEN. */ if (attrlen == 0) break; default: if (pt->len) minlen = pt->len; else if (pt->type != NLA_UNSPEC) minlen = nla_attr_minlen[pt->type]; if (attrlen < minlen) return -ERANGE; } return 0; } /** * nla_validate - Validate a stream of attributes * @head: head of attribute stream * @len: length of attribute stream * @maxtype: maximum attribute type to be expected * @policy: validation policy * * Validates all attributes in the specified attribute stream against the * specified policy. Attributes with a type exceeding maxtype will be * ignored. See documenation of struct nla_policy for more details. * * Returns 0 on success or a negative error code. */ int nla_validate(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy) { const struct nlattr *nla; int rem, err; nla_for_each_attr(nla, head, len, rem) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } err = 0; errout: return err; } EXPORT_SYMBOL(nla_validate); /** * nla_policy_len - Determin the max. length of a policy * @policy: policy to use * @n: number of policies * * Determines the max. length of the policy. It is currently used * to allocated Netlink buffers roughly the size of the actual * message. * * Returns 0 on success or a negative error code. */ int nla_policy_len(const struct nla_policy *p, int n) { int i, len = 0; for (i = 0; i < n; i++, p++) { if (p->len) len += nla_total_size(p->len); else if (nla_attr_minlen[p->type]) len += nla_total_size(nla_attr_minlen[p->type]); } return len; } EXPORT_SYMBOL(nla_policy_len); /** * nla_parse - Parse a stream of attributes into a tb buffer * @tb: destination array with maxtype+1 elements * @maxtype: maximum attribute type to be expected * @head: head of attribute stream * @len: length of attribute stream * @policy: validation policy * * Parses a stream of attributes and stores a pointer to each attribute in * the tb array accessible via the attribute type. Attributes with a type * exceeding maxtype will be silently ignored for backwards compatibility * reasons. policy may be set to NULL if no validation is required. * * Returns 0 on success or a negative error code. */ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, const struct nla_policy *policy) { const struct nlattr *nla; int rem, err; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { u16 type = nla_type(nla); if (type > 0 && type <= maxtype) { if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } tb[type] = (struct nlattr *)nla; } } if (unlikely(rem > 0)) pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n", rem, current->comm); err = 0; errout: return err; } EXPORT_SYMBOL(nla_parse); /** * nla_find - Find a specific attribute in a stream of attributes * @head: head of attribute stream * @len: length of attribute stream * @attrtype: type of attribute to look for * * Returns the first attribute in the stream matching the specified type. */ struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype) { const struct nlattr *nla; int rem; nla_for_each_attr(nla, head, len, rem) if (nla_type(nla) == attrtype) return (struct nlattr *)nla; return NULL; } EXPORT_SYMBOL(nla_find); /** * nla_strlcpy - Copy string attribute payload into a sized buffer * @dst: where to copy the string to * @nla: attribute to copy the string from * @dstsize: size of destination buffer * * Copies at most dstsize - 1 bytes into the destination buffer. * The result is always a valid NUL-terminated string. Unlike * strlcpy the destination buffer is always padded out. * * Returns the length of the source buffer. */ size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize) { size_t srclen = nla_len(nla); char *src = nla_data(nla); if (srclen > 0 && src[srclen - 1] == '\0') srclen--; if (dstsize > 0) { size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen; memset(dst, 0, dstsize); memcpy(dst, src, len); } return srclen; } EXPORT_SYMBOL(nla_strlcpy); /** * nla_memcpy - Copy a netlink attribute into another memory area * @dest: where to copy to memcpy * @src: netlink attribute to copy from * @count: size of the destination area * * Note: The number of bytes copied is limited by the length of * attribute's payload. memcpy * * Returns the number of bytes copied. */ int nla_memcpy(void *dest, const struct nlattr *src, int count) { int minlen = min_t(int, count, nla_len(src)); memcpy(dest, nla_data(src), minlen); if (count > minlen) memset(dest + minlen, 0, count - minlen); return minlen; } EXPORT_SYMBOL(nla_memcpy); /** * nla_memcmp - Compare an attribute with sized memory area * @nla: netlink attribute * @data: memory area * @size: size of memory area */ int nla_memcmp(const struct nlattr *nla, const void *data, size_t size) { int d = nla_len(nla) - size; if (d == 0) d = memcmp(nla_data(nla), data, size); return d; } EXPORT_SYMBOL(nla_memcmp); /** * nla_strcmp - Compare a string attribute against a string * @nla: netlink string attribute * @str: another string */ int nla_strcmp(const struct nlattr *nla, const char *str) { int len = strlen(str); char *buf = nla_data(nla); int attrlen = nla_len(nla); int d; if (attrlen > 0 && buf[attrlen - 1] == '\0') attrlen--; d = attrlen - len; if (d == 0) d = memcmp(nla_data(nla), str, len); return d; } EXPORT_SYMBOL(nla_strcmp); #ifdef CONFIG_NET /** * __nla_reserve - reserve room for attribute on the skb * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { struct nlattr *nla; nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen)); nla->nla_type = attrtype; nla->nla_len = nla_attr_size(attrlen); memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen)); return nla; } EXPORT_SYMBOL(__nla_reserve); /** * __nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on * @attrlen: length of attribute payload * * Reserves room for attribute payload without a header. * * The caller is responsible to ensure that the skb provides enough * tailroom for the payload. */ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { void *start; start = skb_put(skb, NLA_ALIGN(attrlen)); memset(start, 0, NLA_ALIGN(attrlen)); return start; } EXPORT_SYMBOL(__nla_reserve_nohdr); /** * nla_reserve - reserve room for attribute on the skb * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute header and payload. */ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) return NULL; return __nla_reserve(skb, attrtype, attrlen); } EXPORT_SYMBOL(nla_reserve); /** * nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on * @attrlen: length of attribute payload * * Reserves room for attribute payload without a header. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute payload. */ void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return NULL; return __nla_reserve_nohdr(skb, attrlen); } EXPORT_SYMBOL(nla_reserve_nohdr); /** * __nla_put - Add a netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) { struct nlattr *nla; nla = __nla_reserve(skb, attrtype, attrlen); memcpy(nla_data(nla), data, attrlen); } EXPORT_SYMBOL(__nla_put); /** * __nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute payload. */ void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) { void *start; start = __nla_reserve_nohdr(skb, attrlen); memcpy(start, data, attrlen); } EXPORT_SYMBOL(__nla_put_nohdr); /** * nla_put - Add a netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute header and payload. */ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) return -EMSGSIZE; __nla_put(skb, attrtype, attrlen, data); return 0; } EXPORT_SYMBOL(nla_put); /** * nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute payload. */ int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; __nla_put_nohdr(skb, attrlen, data); return 0; } EXPORT_SYMBOL(nla_put_nohdr); /** * nla_append - Add a netlink attribute without header or padding * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute payload. */ int nla_append(struct sk_buff *skb, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; memcpy(skb_put(skb, attrlen), data, attrlen); return 0; } EXPORT_SYMBOL(nla_append); #endif
gpl-2.0
uniquejainakshay/Linux_Kernel
drivers/media/dvb-frontends/ds3000.c
1687
26319
/* Montage Technology DS3000 - DVBS/S2 Demodulator driver Copyright (C) 2009-2012 Konstantin Dimitrov <kosio.dimitrov@gmail.com> Copyright (C) 2009-2012 TurboSight.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/firmware.h> #include "dvb_frontend.h" #include "ts2020.h" #include "ds3000.h" static int debug; #define dprintk(args...) \ do { \ if (debug) \ printk(args); \ } while (0) /* as of March 2009 current DS3000 firmware version is 1.78 */ /* DS3000 FW v1.78 MD5: a32d17910c4f370073f9346e71d34b80 */ #define DS3000_DEFAULT_FIRMWARE "dvb-fe-ds3000.fw" #define DS3000_SAMPLE_RATE 96000 /* in kHz */ /* Register values to initialise the demod in DVB-S mode */ static u8 ds3000_dvbs_init_tab[] = { 0x23, 0x05, 0x08, 0x03, 0x0c, 0x00, 0x21, 0x54, 0x25, 0x82, 0x27, 0x31, 0x30, 0x08, 0x31, 0x40, 0x32, 0x32, 0x33, 0x35, 0x35, 0xff, 0x3a, 0x00, 0x37, 0x10, 0x38, 0x10, 0x39, 0x02, 0x42, 0x60, 0x4a, 0x40, 0x4b, 0x04, 0x4d, 0x91, 0x5d, 0xc8, 0x50, 0x77, 0x51, 0x77, 0x52, 0x36, 0x53, 0x36, 0x56, 0x01, 0x63, 0x43, 0x64, 0x30, 0x65, 0x40, 0x68, 0x26, 0x69, 0x4c, 0x70, 0x20, 0x71, 0x70, 0x72, 0x04, 0x73, 0x00, 0x70, 0x40, 0x71, 0x70, 0x72, 0x04, 0x73, 0x00, 0x70, 0x60, 0x71, 0x70, 0x72, 0x04, 0x73, 0x00, 0x70, 0x80, 0x71, 0x70, 0x72, 0x04, 0x73, 0x00, 0x70, 0xa0, 0x71, 0x70, 0x72, 0x04, 0x73, 0x00, 0x70, 0x1f, 0x76, 0x00, 0x77, 0xd1, 0x78, 0x0c, 0x79, 0x80, 0x7f, 0x04, 0x7c, 0x00, 0x80, 0x86, 0x81, 0xa6, 0x85, 0x04, 0xcd, 0xf4, 0x90, 0x33, 0xa0, 0x44, 0xc0, 0x18, 0xc3, 0x10, 0xc4, 0x08, 0xc5, 0x80, 0xc6, 0x80, 0xc7, 0x0a, 0xc8, 0x1a, 0xc9, 0x80, 0xfe, 0x92, 0xe0, 0xf8, 0xe6, 0x8b, 0xd0, 0x40, 0xf8, 0x20, 0xfa, 0x0f, 0xfd, 0x20, 0xad, 0x20, 0xae, 0x07, 0xb8, 0x00, }; /* Register values to initialise the demod in DVB-S2 mode */ static u8 ds3000_dvbs2_init_tab[] = { 0x23, 0x0f, 0x08, 0x07, 0x0c, 0x00, 0x21, 0x54, 0x25, 0x82, 0x27, 0x31, 0x30, 0x08, 0x31, 0x32, 0x32, 0x32, 0x33, 0x35, 0x35, 0xff, 0x3a, 0x00, 0x37, 0x10, 0x38, 0x10, 0x39, 0x02, 0x42, 0x60, 0x4a, 0x80, 0x4b, 0x04, 0x4d, 0x81, 0x5d, 0x88, 0x50, 0x36, 0x51, 0x36, 0x52, 0x36, 0x53, 0x36, 0x63, 0x60, 0x64, 0x10, 0x65, 0x10, 0x68, 0x04, 0x69, 0x29, 0x70, 0x20, 0x71, 0x70, 0x72, 0x04, 0x73, 0x00, 0x70, 0x40, 0x71, 0x70, 0x72, 0x04, 0x73, 0x00, 0x70, 0x60, 0x71, 0x70, 0x72, 0x04, 0x73, 0x00, 0x70, 0x80, 0x71, 0x70, 0x72, 0x04, 0x73, 0x00, 0x70, 0xa0, 0x71, 0x70, 0x72, 0x04, 0x73, 0x00, 0x70, 0x1f, 0xa0, 0x44, 0xc0, 0x08, 0xc1, 0x10, 0xc2, 0x08, 0xc3, 0x10, 0xc4, 0x08, 0xc5, 0xf0, 0xc6, 0xf0, 0xc7, 0x0a, 0xc8, 0x1a, 0xc9, 0x80, 0xca, 0x23, 0xcb, 0x24, 0xce, 0x74, 0x90, 0x03, 0x76, 0x80, 0x77, 0x42, 0x78, 0x0a, 0x79, 0x80, 0xad, 0x40, 0xae, 0x07, 0x7f, 0xd4, 0x7c, 0x00, 0x80, 0xa8, 0x81, 0xda, 0x7c, 0x01, 0x80, 0xda, 0x81, 0xec, 0x7c, 0x02, 0x80, 0xca, 0x81, 0xeb, 0x7c, 0x03, 0x80, 0xba, 0x81, 0xdb, 0x85, 0x08, 0x86, 0x00, 0x87, 0x02, 0x89, 0x80, 0x8b, 0x44, 0x8c, 0xaa, 0x8a, 0x10, 0xba, 0x00, 0xf5, 0x04, 0xfe, 0x44, 0xd2, 0x32, 0xb8, 0x00, }; struct ds3000_state { struct i2c_adapter *i2c; const struct ds3000_config *config; struct dvb_frontend frontend; /* previous uncorrected block counter for DVB-S2 */ u16 prevUCBS2; }; static int ds3000_writereg(struct ds3000_state *state, int reg, int data) { u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; int err; dprintk("%s: write reg 0x%02x, value 0x%02x\n", __func__, reg, data); err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) { printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x," " value == 0x%02x)\n", __func__, err, reg, data); return -EREMOTEIO; } return 0; } static int ds3000_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct ds3000_state *state = fe->demodulator_priv; if (enable) ds3000_writereg(state, 0x03, 0x12); else ds3000_writereg(state, 0x03, 0x02); return 0; } /* I2C write for 8k firmware load */ static int ds3000_writeFW(struct ds3000_state *state, int reg, const u8 *data, u16 len) { int i, ret = 0; struct i2c_msg msg; u8 *buf; buf = kmalloc(33, GFP_KERNEL); if (buf == NULL) { printk(KERN_ERR "Unable to kmalloc\n"); return -ENOMEM; } *(buf) = reg; msg.addr = state->config->demod_address; msg.flags = 0; msg.buf = buf; msg.len = 33; for (i = 0; i < len; i += 32) { memcpy(buf + 1, data + i, 32); dprintk("%s: write reg 0x%02x, len = %d\n", __func__, reg, len); ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) { printk(KERN_ERR "%s: write error(err == %i, " "reg == 0x%02x\n", __func__, ret, reg); ret = -EREMOTEIO; goto error; } } ret = 0; error: kfree(buf); return ret; } static int ds3000_readreg(struct ds3000_state *state, u8 reg) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { printk(KERN_ERR "%s: reg=0x%x(error=%d)\n", __func__, reg, ret); return ret; } dprintk("%s: read reg 0x%02x, value 0x%02x\n", __func__, reg, b1[0]); return b1[0]; } static int ds3000_load_firmware(struct dvb_frontend *fe, const struct firmware *fw); static int ds3000_firmware_ondemand(struct dvb_frontend *fe) { struct ds3000_state *state = fe->demodulator_priv; const struct firmware *fw; int ret = 0; dprintk("%s()\n", __func__); ret = ds3000_readreg(state, 0xb2); if (ret < 0) return ret; /* Load firmware */ /* request the firmware, this will block until someone uploads it */ printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__, DS3000_DEFAULT_FIRMWARE); ret = request_firmware(&fw, DS3000_DEFAULT_FIRMWARE, state->i2c->dev.parent); printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n", __func__); if (ret) { printk(KERN_ERR "%s: No firmware uploaded (timeout or file not " "found?)\n", __func__); return ret; } ret = ds3000_load_firmware(fe, fw); if (ret) printk("%s: Writing firmware to device failed\n", __func__); release_firmware(fw); dprintk("%s: Firmware upload %s\n", __func__, ret == 0 ? "complete" : "failed"); return ret; } static int ds3000_load_firmware(struct dvb_frontend *fe, const struct firmware *fw) { struct ds3000_state *state = fe->demodulator_priv; int ret = 0; dprintk("%s\n", __func__); dprintk("Firmware is %zu bytes (%02x %02x .. %02x %02x)\n", fw->size, fw->data[0], fw->data[1], fw->data[fw->size - 2], fw->data[fw->size - 1]); /* Begin the firmware load process */ ds3000_writereg(state, 0xb2, 0x01); /* write the entire firmware */ ret = ds3000_writeFW(state, 0xb0, fw->data, fw->size); ds3000_writereg(state, 0xb2, 0x00); return ret; } static int ds3000_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct ds3000_state *state = fe->demodulator_priv; u8 data; dprintk("%s(%d)\n", __func__, voltage); data = ds3000_readreg(state, 0xa2); data |= 0x03; /* bit0 V/H, bit1 off/on */ switch (voltage) { case SEC_VOLTAGE_18: data &= ~0x03; break; case SEC_VOLTAGE_13: data &= ~0x03; data |= 0x01; break; case SEC_VOLTAGE_OFF: break; } ds3000_writereg(state, 0xa2, data); return 0; } static int ds3000_read_status(struct dvb_frontend *fe, fe_status_t* status) { struct ds3000_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int lock; *status = 0; switch (c->delivery_system) { case SYS_DVBS: lock = ds3000_readreg(state, 0xd1); if ((lock & 0x07) == 0x07) *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; break; case SYS_DVBS2: lock = ds3000_readreg(state, 0x0d); if ((lock & 0x8f) == 0x8f) *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; break; default: return 1; } if (state->config->set_lock_led) state->config->set_lock_led(fe, *status == 0 ? 0 : 1); dprintk("%s: status = 0x%02x\n", __func__, lock); return 0; } /* read DS3000 BER value */ static int ds3000_read_ber(struct dvb_frontend *fe, u32* ber) { struct ds3000_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; u8 data; u32 ber_reading, lpdc_frames; dprintk("%s()\n", __func__); switch (c->delivery_system) { case SYS_DVBS: /* set the number of bytes checked during BER estimation */ ds3000_writereg(state, 0xf9, 0x04); /* read BER estimation status */ data = ds3000_readreg(state, 0xf8); /* check if BER estimation is ready */ if ((data & 0x10) == 0) { /* this is the number of error bits, to calculate the bit error rate divide to 8388608 */ *ber = (ds3000_readreg(state, 0xf7) << 8) | ds3000_readreg(state, 0xf6); /* start counting error bits */ /* need to be set twice otherwise it fails sometimes */ data |= 0x10; ds3000_writereg(state, 0xf8, data); ds3000_writereg(state, 0xf8, data); } else /* used to indicate that BER estimation is not ready, i.e. BER is unknown */ *ber = 0xffffffff; break; case SYS_DVBS2: /* read the number of LPDC decoded frames */ lpdc_frames = (ds3000_readreg(state, 0xd7) << 16) | (ds3000_readreg(state, 0xd6) << 8) | ds3000_readreg(state, 0xd5); /* read the number of packets with bad CRC */ ber_reading = (ds3000_readreg(state, 0xf8) << 8) | ds3000_readreg(state, 0xf7); if (lpdc_frames > 750) { /* clear LPDC frame counters */ ds3000_writereg(state, 0xd1, 0x01); /* clear bad packets counter */ ds3000_writereg(state, 0xf9, 0x01); /* enable bad packets counter */ ds3000_writereg(state, 0xf9, 0x00); /* enable LPDC frame counters */ ds3000_writereg(state, 0xd1, 0x00); *ber = ber_reading; } else /* used to indicate that BER estimation is not ready, i.e. BER is unknown */ *ber = 0xffffffff; break; default: return 1; } return 0; } static int ds3000_read_signal_strength(struct dvb_frontend *fe, u16 *signal_strength) { if (fe->ops.tuner_ops.get_rf_strength) fe->ops.tuner_ops.get_rf_strength(fe, signal_strength); return 0; } /* calculate DS3000 snr value in dB */ static int ds3000_read_snr(struct dvb_frontend *fe, u16 *snr) { struct ds3000_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; u8 snr_reading, snr_value; u32 dvbs2_signal_reading, dvbs2_noise_reading, tmp; static const u16 dvbs_snr_tab[] = { /* 20 x Table (rounded up) */ 0x0000, 0x1b13, 0x2aea, 0x3627, 0x3ede, 0x45fe, 0x4c03, 0x513a, 0x55d4, 0x59f2, 0x5dab, 0x6111, 0x6431, 0x6717, 0x69c9, 0x6c4e, 0x6eac, 0x70e8, 0x7304, 0x7505 }; static const u16 dvbs2_snr_tab[] = { /* 80 x Table (rounded up) */ 0x0000, 0x0bc2, 0x12a3, 0x1785, 0x1b4e, 0x1e65, 0x2103, 0x2347, 0x2546, 0x2710, 0x28ae, 0x2a28, 0x2b83, 0x2cc5, 0x2df1, 0x2f09, 0x3010, 0x3109, 0x31f4, 0x32d2, 0x33a6, 0x3470, 0x3531, 0x35ea, 0x369b, 0x3746, 0x37ea, 0x3888, 0x3920, 0x39b3, 0x3a42, 0x3acc, 0x3b51, 0x3bd3, 0x3c51, 0x3ccb, 0x3d42, 0x3db6, 0x3e27, 0x3e95, 0x3f00, 0x3f68, 0x3fcf, 0x4033, 0x4094, 0x40f4, 0x4151, 0x41ac, 0x4206, 0x425e, 0x42b4, 0x4308, 0x435b, 0x43ac, 0x43fc, 0x444a, 0x4497, 0x44e2, 0x452d, 0x4576, 0x45bd, 0x4604, 0x4649, 0x468e, 0x46d1, 0x4713, 0x4755, 0x4795, 0x47d4, 0x4813, 0x4851, 0x488d, 0x48c9, 0x4904, 0x493f, 0x4978, 0x49b1, 0x49e9, 0x4a20, 0x4a57 }; dprintk("%s()\n", __func__); switch (c->delivery_system) { case SYS_DVBS: snr_reading = ds3000_readreg(state, 0xff); snr_reading /= 8; if (snr_reading == 0) *snr = 0x0000; else { if (snr_reading > 20) snr_reading = 20; snr_value = dvbs_snr_tab[snr_reading - 1] * 10 / 23026; /* cook the value to be suitable for szap-s2 human readable output */ *snr = snr_value * 8 * 655; } dprintk("%s: raw / cooked = 0x%02x / 0x%04x\n", __func__, snr_reading, *snr); break; case SYS_DVBS2: dvbs2_noise_reading = (ds3000_readreg(state, 0x8c) & 0x3f) + (ds3000_readreg(state, 0x8d) << 4); dvbs2_signal_reading = ds3000_readreg(state, 0x8e); tmp = dvbs2_signal_reading * dvbs2_signal_reading >> 1; if (tmp == 0) { *snr = 0x0000; return 0; } if (dvbs2_noise_reading == 0) { snr_value = 0x0013; /* cook the value to be suitable for szap-s2 human readable output */ *snr = 0xffff; return 0; } if (tmp > dvbs2_noise_reading) { snr_reading = tmp / dvbs2_noise_reading; if (snr_reading > 80) snr_reading = 80; snr_value = dvbs2_snr_tab[snr_reading - 1] / 1000; /* cook the value to be suitable for szap-s2 human readable output */ *snr = snr_value * 5 * 655; } else { snr_reading = dvbs2_noise_reading / tmp; if (snr_reading > 80) snr_reading = 80; *snr = -(dvbs2_snr_tab[snr_reading] / 1000); } dprintk("%s: raw / cooked = 0x%02x / 0x%04x\n", __func__, snr_reading, *snr); break; default: return 1; } return 0; } /* read DS3000 uncorrected blocks */ static int ds3000_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct ds3000_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; u8 data; u16 _ucblocks; dprintk("%s()\n", __func__); switch (c->delivery_system) { case SYS_DVBS: *ucblocks = (ds3000_readreg(state, 0xf5) << 8) | ds3000_readreg(state, 0xf4); data = ds3000_readreg(state, 0xf8); /* clear packet counters */ data &= ~0x20; ds3000_writereg(state, 0xf8, data); /* enable packet counters */ data |= 0x20; ds3000_writereg(state, 0xf8, data); break; case SYS_DVBS2: _ucblocks = (ds3000_readreg(state, 0xe2) << 8) | ds3000_readreg(state, 0xe1); if (_ucblocks > state->prevUCBS2) *ucblocks = _ucblocks - state->prevUCBS2; else *ucblocks = state->prevUCBS2 - _ucblocks; state->prevUCBS2 = _ucblocks; break; default: return 1; } return 0; } static int ds3000_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct ds3000_state *state = fe->demodulator_priv; u8 data; dprintk("%s(%d)\n", __func__, tone); if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) { printk(KERN_ERR "%s: Invalid, tone=%d\n", __func__, tone); return -EINVAL; } data = ds3000_readreg(state, 0xa2); data &= ~0xc0; ds3000_writereg(state, 0xa2, data); switch (tone) { case SEC_TONE_ON: dprintk("%s: setting tone on\n", __func__); data = ds3000_readreg(state, 0xa1); data &= ~0x43; data |= 0x04; ds3000_writereg(state, 0xa1, data); break; case SEC_TONE_OFF: dprintk("%s: setting tone off\n", __func__); data = ds3000_readreg(state, 0xa2); data |= 0x80; ds3000_writereg(state, 0xa2, data); break; } return 0; } static int ds3000_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *d) { struct ds3000_state *state = fe->demodulator_priv; int i; u8 data; /* Dump DiSEqC message */ dprintk("%s(", __func__); for (i = 0 ; i < d->msg_len;) { dprintk("0x%02x", d->msg[i]); if (++i < d->msg_len) dprintk(", "); } /* enable DiSEqC message send pin */ data = ds3000_readreg(state, 0xa2); data &= ~0xc0; ds3000_writereg(state, 0xa2, data); /* DiSEqC message */ for (i = 0; i < d->msg_len; i++) ds3000_writereg(state, 0xa3 + i, d->msg[i]); data = ds3000_readreg(state, 0xa1); /* clear DiSEqC message length and status, enable DiSEqC message send */ data &= ~0xf8; /* set DiSEqC mode, modulation active during 33 pulses, set DiSEqC message length */ data |= ((d->msg_len - 1) << 3) | 0x07; ds3000_writereg(state, 0xa1, data); /* wait up to 150ms for DiSEqC transmission to complete */ for (i = 0; i < 15; i++) { data = ds3000_readreg(state, 0xa1); if ((data & 0x40) == 0) break; msleep(10); } /* DiSEqC timeout after 150ms */ if (i == 15) { data = ds3000_readreg(state, 0xa1); data &= ~0x80; data |= 0x40; ds3000_writereg(state, 0xa1, data); data = ds3000_readreg(state, 0xa2); data &= ~0xc0; data |= 0x80; ds3000_writereg(state, 0xa2, data); return 1; } data = ds3000_readreg(state, 0xa2); data &= ~0xc0; data |= 0x80; ds3000_writereg(state, 0xa2, data); return 0; } /* Send DiSEqC burst */ static int ds3000_diseqc_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst) { struct ds3000_state *state = fe->demodulator_priv; int i; u8 data; dprintk("%s()\n", __func__); data = ds3000_readreg(state, 0xa2); data &= ~0xc0; ds3000_writereg(state, 0xa2, data); /* DiSEqC burst */ if (burst == SEC_MINI_A) /* Unmodulated tone burst */ ds3000_writereg(state, 0xa1, 0x02); else if (burst == SEC_MINI_B) /* Modulated tone burst */ ds3000_writereg(state, 0xa1, 0x01); else return -EINVAL; msleep(13); for (i = 0; i < 5; i++) { data = ds3000_readreg(state, 0xa1); if ((data & 0x40) == 0) break; msleep(1); } if (i == 5) { data = ds3000_readreg(state, 0xa1); data &= ~0x80; data |= 0x40; ds3000_writereg(state, 0xa1, data); data = ds3000_readreg(state, 0xa2); data &= ~0xc0; data |= 0x80; ds3000_writereg(state, 0xa2, data); return 1; } data = ds3000_readreg(state, 0xa2); data &= ~0xc0; data |= 0x80; ds3000_writereg(state, 0xa2, data); return 0; } static void ds3000_release(struct dvb_frontend *fe) { struct ds3000_state *state = fe->demodulator_priv; if (state->config->set_lock_led) state->config->set_lock_led(fe, 0); dprintk("%s\n", __func__); kfree(state); } static struct dvb_frontend_ops ds3000_ops; struct dvb_frontend *ds3000_attach(const struct ds3000_config *config, struct i2c_adapter *i2c) { struct ds3000_state *state = NULL; int ret; dprintk("%s\n", __func__); /* allocate memory for the internal state */ state = kzalloc(sizeof(struct ds3000_state), GFP_KERNEL); if (state == NULL) { printk(KERN_ERR "Unable to kmalloc\n"); goto error2; } state->config = config; state->i2c = i2c; state->prevUCBS2 = 0; /* check if the demod is present */ ret = ds3000_readreg(state, 0x00) & 0xfe; if (ret != 0xe0) { printk(KERN_ERR "Invalid probe, probably not a DS3000\n"); goto error3; } printk(KERN_INFO "DS3000 chip version: %d.%d attached.\n", ds3000_readreg(state, 0x02), ds3000_readreg(state, 0x01)); memcpy(&state->frontend.ops, &ds3000_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error3: kfree(state); error2: return NULL; } EXPORT_SYMBOL(ds3000_attach); static int ds3000_set_carrier_offset(struct dvb_frontend *fe, s32 carrier_offset_khz) { struct ds3000_state *state = fe->demodulator_priv; s32 tmp; tmp = carrier_offset_khz; tmp *= 65536; tmp = (2 * tmp + DS3000_SAMPLE_RATE) / (2 * DS3000_SAMPLE_RATE); if (tmp < 0) tmp += 65536; ds3000_writereg(state, 0x5f, tmp >> 8); ds3000_writereg(state, 0x5e, tmp & 0xff); return 0; } static int ds3000_set_frontend(struct dvb_frontend *fe) { struct ds3000_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int i; fe_status_t status; s32 offset_khz; u32 frequency; u16 value; dprintk("%s() ", __func__); if (state->config->set_ts_params) state->config->set_ts_params(fe, 0); /* Tune */ if (fe->ops.tuner_ops.set_params) fe->ops.tuner_ops.set_params(fe); /* ds3000 global reset */ ds3000_writereg(state, 0x07, 0x80); ds3000_writereg(state, 0x07, 0x00); /* ds3000 build-in uC reset */ ds3000_writereg(state, 0xb2, 0x01); /* ds3000 software reset */ ds3000_writereg(state, 0x00, 0x01); switch (c->delivery_system) { case SYS_DVBS: /* initialise the demod in DVB-S mode */ for (i = 0; i < sizeof(ds3000_dvbs_init_tab); i += 2) ds3000_writereg(state, ds3000_dvbs_init_tab[i], ds3000_dvbs_init_tab[i + 1]); value = ds3000_readreg(state, 0xfe); value &= 0xc0; value |= 0x1b; ds3000_writereg(state, 0xfe, value); break; case SYS_DVBS2: /* initialise the demod in DVB-S2 mode */ for (i = 0; i < sizeof(ds3000_dvbs2_init_tab); i += 2) ds3000_writereg(state, ds3000_dvbs2_init_tab[i], ds3000_dvbs2_init_tab[i + 1]); if (c->symbol_rate >= 30000000) ds3000_writereg(state, 0xfe, 0x54); else ds3000_writereg(state, 0xfe, 0x98); break; default: return 1; } /* enable 27MHz clock output */ ds3000_writereg(state, 0x29, 0x80); /* enable ac coupling */ ds3000_writereg(state, 0x25, 0x8a); /* enhance symbol rate performance */ if ((c->symbol_rate / 1000) <= 5000) { value = 29777 / (c->symbol_rate / 1000) + 1; if (value % 2 != 0) value++; ds3000_writereg(state, 0xc3, 0x0d); ds3000_writereg(state, 0xc8, value); ds3000_writereg(state, 0xc4, 0x10); ds3000_writereg(state, 0xc7, 0x0e); } else if ((c->symbol_rate / 1000) <= 10000) { value = 92166 / (c->symbol_rate / 1000) + 1; if (value % 2 != 0) value++; ds3000_writereg(state, 0xc3, 0x07); ds3000_writereg(state, 0xc8, value); ds3000_writereg(state, 0xc4, 0x09); ds3000_writereg(state, 0xc7, 0x12); } else if ((c->symbol_rate / 1000) <= 20000) { value = 64516 / (c->symbol_rate / 1000) + 1; ds3000_writereg(state, 0xc3, value); ds3000_writereg(state, 0xc8, 0x0e); ds3000_writereg(state, 0xc4, 0x07); ds3000_writereg(state, 0xc7, 0x18); } else { value = 129032 / (c->symbol_rate / 1000) + 1; ds3000_writereg(state, 0xc3, value); ds3000_writereg(state, 0xc8, 0x0a); ds3000_writereg(state, 0xc4, 0x05); ds3000_writereg(state, 0xc7, 0x24); } /* normalized symbol rate rounded to the closest integer */ value = (((c->symbol_rate / 1000) << 16) + (DS3000_SAMPLE_RATE / 2)) / DS3000_SAMPLE_RATE; ds3000_writereg(state, 0x61, value & 0x00ff); ds3000_writereg(state, 0x62, (value & 0xff00) >> 8); /* co-channel interference cancellation disabled */ ds3000_writereg(state, 0x56, 0x00); /* equalizer disabled */ ds3000_writereg(state, 0x76, 0x00); /*ds3000_writereg(state, 0x08, 0x03); ds3000_writereg(state, 0xfd, 0x22); ds3000_writereg(state, 0x08, 0x07); ds3000_writereg(state, 0xfd, 0x42); ds3000_writereg(state, 0x08, 0x07);*/ if (state->config->ci_mode) { switch (c->delivery_system) { case SYS_DVBS: default: ds3000_writereg(state, 0xfd, 0x80); break; case SYS_DVBS2: ds3000_writereg(state, 0xfd, 0x01); break; } } /* ds3000 out of software reset */ ds3000_writereg(state, 0x00, 0x00); /* start ds3000 build-in uC */ ds3000_writereg(state, 0xb2, 0x00); if (fe->ops.tuner_ops.get_frequency) { fe->ops.tuner_ops.get_frequency(fe, &frequency); offset_khz = frequency - c->frequency; ds3000_set_carrier_offset(fe, offset_khz); } for (i = 0; i < 30 ; i++) { ds3000_read_status(fe, &status); if (status & FE_HAS_LOCK) break; msleep(10); } return 0; } static int ds3000_tune(struct dvb_frontend *fe, bool re_tune, unsigned int mode_flags, unsigned int *delay, fe_status_t *status) { if (re_tune) { int ret = ds3000_set_frontend(fe); if (ret) return ret; } *delay = HZ / 5; return ds3000_read_status(fe, status); } static enum dvbfe_algo ds3000_get_algo(struct dvb_frontend *fe) { struct ds3000_state *state = fe->demodulator_priv; if (state->config->set_lock_led) state->config->set_lock_led(fe, 0); dprintk("%s()\n", __func__); return DVBFE_ALGO_HW; } /* * Initialise or wake up device * * Power config will reset and load initial firmware if required */ static int ds3000_initfe(struct dvb_frontend *fe) { struct ds3000_state *state = fe->demodulator_priv; int ret; dprintk("%s()\n", __func__); /* hard reset */ ds3000_writereg(state, 0x08, 0x01 | ds3000_readreg(state, 0x08)); msleep(1); /* Load the firmware if required */ ret = ds3000_firmware_ondemand(fe); if (ret != 0) { printk(KERN_ERR "%s: Unable initialize firmware\n", __func__); return ret; } return 0; } static struct dvb_frontend_ops ds3000_ops = { .delsys = { SYS_DVBS, SYS_DVBS2 }, .info = { .name = "Montage Technology DS3000", .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 1011, /* kHz for QPSK frontends */ .frequency_tolerance = 5000, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_2G_MODULATION | FE_CAN_QPSK | FE_CAN_RECOVER }, .release = ds3000_release, .init = ds3000_initfe, .i2c_gate_ctrl = ds3000_i2c_gate_ctrl, .read_status = ds3000_read_status, .read_ber = ds3000_read_ber, .read_signal_strength = ds3000_read_signal_strength, .read_snr = ds3000_read_snr, .read_ucblocks = ds3000_read_ucblocks, .set_voltage = ds3000_set_voltage, .set_tone = ds3000_set_tone, .diseqc_send_master_cmd = ds3000_send_diseqc_msg, .diseqc_send_burst = ds3000_diseqc_send_burst, .get_frontend_algo = ds3000_get_algo, .set_frontend = ds3000_set_frontend, .tune = ds3000_tune, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)"); MODULE_DESCRIPTION("DVB Frontend module for Montage Technology " "DS3000 hardware"); MODULE_AUTHOR("Konstantin Dimitrov <kosio.dimitrov@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(DS3000_DEFAULT_FIRMWARE);
gpl-2.0
narantech/linux-pc64
arch/arm/mach-omap1/pm.c
2199
19343
/* * linux/arch/arm/mach-omap1/pm.c * * OMAP Power Management Routines * * Original code for the SA11x0: * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com> * * Modified for the PXA250 by Nicolas Pitre: * Copyright (c) 2002 Monta Vista Software, Inc. * * Modified for the OMAP1510 by David Singleton: * Copyright (c) 2002 Monta Vista Software, Inc. * * Cleanup 2004 for OMAP1510/1610 by Dirk Behme <dirk.behme@de.bosch.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/suspend.h> #include <linux/sched.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/io.h> #include <linux/atomic.h> #include <linux/cpu.h> #include <asm/fncpy.h> #include <asm/system_misc.h> #include <asm/irq.h> #include <asm/mach/time.h> #include <asm/mach/irq.h> #include <mach/tc.h> #include <mach/mux.h> #include <linux/omap-dma.h> #include <plat/dmtimer.h> #include <mach/irqs.h> #include "iomap.h" #include "clock.h" #include "pm.h" #include "sram.h" static unsigned int arm_sleep_save[ARM_SLEEP_SAVE_SIZE]; static unsigned short dsp_sleep_save[DSP_SLEEP_SAVE_SIZE]; static unsigned short ulpd_sleep_save[ULPD_SLEEP_SAVE_SIZE]; static unsigned int mpui7xx_sleep_save[MPUI7XX_SLEEP_SAVE_SIZE]; static unsigned int mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_SIZE]; static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE]; #ifdef CONFIG_OMAP_32K_TIMER static unsigned short enable_dyn_sleep = 1; static ssize_t idle_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%hu\n", enable_dyn_sleep); } static ssize_t idle_store(struct kobject *kobj, struct kobj_attribute *attr, const char * buf, size_t n) { unsigned short value; if (sscanf(buf, "%hu", &value) != 1 || (value != 0 && value != 1)) { printk(KERN_ERR "idle_sleep_store: Invalid value\n"); return -EINVAL; } enable_dyn_sleep = value; return n; } static struct kobj_attribute sleep_while_idle_attr = __ATTR(sleep_while_idle, 0644, idle_show, idle_store); #endif static void (*omap_sram_suspend)(unsigned long r0, unsigned long r1) = NULL; /* * Let's power down on idle, but only if we are really * idle, because once we start down the path of * going idle we continue to do idle even if we get * a clock tick interrupt . . */ void omap1_pm_idle(void) { extern __u32 arm_idlect1_mask; __u32 use_idlect1 = arm_idlect1_mask; int do_sleep = 0; local_fiq_disable(); #if defined(CONFIG_OMAP_MPU_TIMER) && !defined(CONFIG_OMAP_DM_TIMER) #warning Enable 32kHz OS timer in order to allow sleep states in idle use_idlect1 = use_idlect1 & ~(1 << 9); #else while (enable_dyn_sleep) { #ifdef CONFIG_CBUS_TAHVO_USB extern int vbus_active; /* Clock requirements? */ if (vbus_active) break; #endif do_sleep = 1; break; } #endif #ifdef CONFIG_OMAP_DM_TIMER use_idlect1 = omap_dm_timer_modify_idlect_mask(use_idlect1); #endif if (omap_dma_running()) use_idlect1 &= ~(1 << 6); /* We should be able to remove the do_sleep variable and multiple * tests above as soon as drivers, timer and DMA code have been fixed. * Even the sleep block count should become obsolete. */ if ((use_idlect1 != ~0) || !do_sleep) { __u32 saved_idlect1 = omap_readl(ARM_IDLECT1); if (cpu_is_omap15xx()) use_idlect1 &= OMAP1510_BIG_SLEEP_REQUEST; else use_idlect1 &= OMAP1610_IDLECT1_SLEEP_VAL; omap_writel(use_idlect1, ARM_IDLECT1); __asm__ volatile ("mcr p15, 0, r0, c7, c0, 4"); omap_writel(saved_idlect1, ARM_IDLECT1); local_fiq_enable(); return; } omap_sram_suspend(omap_readl(ARM_IDLECT1), omap_readl(ARM_IDLECT2)); local_fiq_enable(); } /* * Configuration of the wakeup event is board specific. For the * moment we put it into this helper function. Later it may move * to board specific files. */ static void omap_pm_wakeup_setup(void) { u32 level1_wake = 0; u32 level2_wake = OMAP_IRQ_BIT(INT_UART2); /* * Turn off all interrupts except GPIO bank 1, L1-2nd level cascade, * and the L2 wakeup interrupts: keypad and UART2. Note that the * drivers must still separately call omap_set_gpio_wakeup() to * wake up to a GPIO interrupt. */ if (cpu_is_omap7xx()) level1_wake = OMAP_IRQ_BIT(INT_7XX_GPIO_BANK1) | OMAP_IRQ_BIT(INT_7XX_IH2_IRQ); else if (cpu_is_omap15xx()) level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) | OMAP_IRQ_BIT(INT_1510_IH2_IRQ); else if (cpu_is_omap16xx()) level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) | OMAP_IRQ_BIT(INT_1610_IH2_IRQ); omap_writel(~level1_wake, OMAP_IH1_MIR); if (cpu_is_omap7xx()) { omap_writel(~level2_wake, OMAP_IH2_0_MIR); omap_writel(~(OMAP_IRQ_BIT(INT_7XX_WAKE_UP_REQ) | OMAP_IRQ_BIT(INT_7XX_MPUIO_KEYPAD)), OMAP_IH2_1_MIR); } else if (cpu_is_omap15xx()) { level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD); omap_writel(~level2_wake, OMAP_IH2_MIR); } else if (cpu_is_omap16xx()) { level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD); omap_writel(~level2_wake, OMAP_IH2_0_MIR); /* INT_1610_WAKE_UP_REQ is needed for GPIO wakeup... */ omap_writel(~OMAP_IRQ_BIT(INT_1610_WAKE_UP_REQ), OMAP_IH2_1_MIR); omap_writel(~0x0, OMAP_IH2_2_MIR); omap_writel(~0x0, OMAP_IH2_3_MIR); } /* New IRQ agreement, recalculate in cascade order */ omap_writel(1, OMAP_IH2_CONTROL); omap_writel(1, OMAP_IH1_CONTROL); } #define EN_DSPCK 13 /* ARM_CKCTL */ #define EN_APICK 6 /* ARM_IDLECT2 */ #define DSP_EN 1 /* ARM_RSTCT1 */ void omap1_pm_suspend(void) { unsigned long arg0 = 0, arg1 = 0; printk(KERN_INFO "PM: OMAP%x is trying to enter deep sleep...\n", omap_rev()); omap_serial_wake_trigger(1); if (!cpu_is_omap15xx()) omap_writew(0xffff, ULPD_SOFT_DISABLE_REQ_REG); /* * Step 1: turn off interrupts (FIXME: NOTE: already disabled) */ local_irq_disable(); local_fiq_disable(); /* * Step 2: save registers * * The omap is a strange/beautiful device. The caches, memory * and register state are preserved across power saves. * We have to save and restore very little register state to * idle the omap. * * Save interrupt, MPUI, ARM and UPLD control registers. */ if (cpu_is_omap7xx()) { MPUI7XX_SAVE(OMAP_IH1_MIR); MPUI7XX_SAVE(OMAP_IH2_0_MIR); MPUI7XX_SAVE(OMAP_IH2_1_MIR); MPUI7XX_SAVE(MPUI_CTRL); MPUI7XX_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI7XX_SAVE(MPUI_DSP_API_CONFIG); MPUI7XX_SAVE(EMIFS_CONFIG); MPUI7XX_SAVE(EMIFF_SDRAM_CONFIG); } else if (cpu_is_omap15xx()) { MPUI1510_SAVE(OMAP_IH1_MIR); MPUI1510_SAVE(OMAP_IH2_MIR); MPUI1510_SAVE(MPUI_CTRL); MPUI1510_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI1510_SAVE(MPUI_DSP_API_CONFIG); MPUI1510_SAVE(EMIFS_CONFIG); MPUI1510_SAVE(EMIFF_SDRAM_CONFIG); } else if (cpu_is_omap16xx()) { MPUI1610_SAVE(OMAP_IH1_MIR); MPUI1610_SAVE(OMAP_IH2_0_MIR); MPUI1610_SAVE(OMAP_IH2_1_MIR); MPUI1610_SAVE(OMAP_IH2_2_MIR); MPUI1610_SAVE(OMAP_IH2_3_MIR); MPUI1610_SAVE(MPUI_CTRL); MPUI1610_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI1610_SAVE(MPUI_DSP_API_CONFIG); MPUI1610_SAVE(EMIFS_CONFIG); MPUI1610_SAVE(EMIFF_SDRAM_CONFIG); } ARM_SAVE(ARM_CKCTL); ARM_SAVE(ARM_IDLECT1); ARM_SAVE(ARM_IDLECT2); if (!(cpu_is_omap15xx())) ARM_SAVE(ARM_IDLECT3); ARM_SAVE(ARM_EWUPCT); ARM_SAVE(ARM_RSTCT1); ARM_SAVE(ARM_RSTCT2); ARM_SAVE(ARM_SYSST); ULPD_SAVE(ULPD_CLOCK_CTRL); ULPD_SAVE(ULPD_STATUS_REQ); /* (Step 3 removed - we now allow deep sleep by default) */ /* * Step 4: OMAP DSP Shutdown */ /* stop DSP */ omap_writew(omap_readw(ARM_RSTCT1) & ~(1 << DSP_EN), ARM_RSTCT1); /* shut down dsp_ck */ if (!cpu_is_omap7xx()) omap_writew(omap_readw(ARM_CKCTL) & ~(1 << EN_DSPCK), ARM_CKCTL); /* temporarily enabling api_ck to access DSP registers */ omap_writew(omap_readw(ARM_IDLECT2) | 1 << EN_APICK, ARM_IDLECT2); /* save DSP registers */ DSP_SAVE(DSP_IDLECT2); /* Stop all DSP domain clocks */ __raw_writew(0, DSP_IDLECT2); /* * Step 5: Wakeup Event Setup */ omap_pm_wakeup_setup(); /* * Step 6: ARM and Traffic controller shutdown */ /* disable ARM watchdog */ omap_writel(0x00F5, OMAP_WDT_TIMER_MODE); omap_writel(0x00A0, OMAP_WDT_TIMER_MODE); /* * Step 6b: ARM and Traffic controller shutdown * * Step 6 continues here. Prepare jump to power management * assembly code in internal SRAM. * * Since the omap_cpu_suspend routine has been copied to * SRAM, we'll do an indirect procedure call to it and pass the * contents of arm_idlect1 and arm_idlect2 so it can restore * them when it wakes up and it will return. */ arg0 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT1]; arg1 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT2]; /* * Step 6c: ARM and Traffic controller shutdown * * Jump to assembly code. The processor will stay there * until wake up. */ omap_sram_suspend(arg0, arg1); /* * If we are here, processor is woken up! */ /* * Restore DSP clocks */ /* again temporarily enabling api_ck to access DSP registers */ omap_writew(omap_readw(ARM_IDLECT2) | 1 << EN_APICK, ARM_IDLECT2); /* Restore DSP domain clocks */ DSP_RESTORE(DSP_IDLECT2); /* * Restore ARM state, except ARM_IDLECT1/2 which omap_cpu_suspend did */ if (!(cpu_is_omap15xx())) ARM_RESTORE(ARM_IDLECT3); ARM_RESTORE(ARM_CKCTL); ARM_RESTORE(ARM_EWUPCT); ARM_RESTORE(ARM_RSTCT1); ARM_RESTORE(ARM_RSTCT2); ARM_RESTORE(ARM_SYSST); ULPD_RESTORE(ULPD_CLOCK_CTRL); ULPD_RESTORE(ULPD_STATUS_REQ); if (cpu_is_omap7xx()) { MPUI7XX_RESTORE(EMIFS_CONFIG); MPUI7XX_RESTORE(EMIFF_SDRAM_CONFIG); MPUI7XX_RESTORE(OMAP_IH1_MIR); MPUI7XX_RESTORE(OMAP_IH2_0_MIR); MPUI7XX_RESTORE(OMAP_IH2_1_MIR); } else if (cpu_is_omap15xx()) { MPUI1510_RESTORE(MPUI_CTRL); MPUI1510_RESTORE(MPUI_DSP_BOOT_CONFIG); MPUI1510_RESTORE(MPUI_DSP_API_CONFIG); MPUI1510_RESTORE(EMIFS_CONFIG); MPUI1510_RESTORE(EMIFF_SDRAM_CONFIG); MPUI1510_RESTORE(OMAP_IH1_MIR); MPUI1510_RESTORE(OMAP_IH2_MIR); } else if (cpu_is_omap16xx()) { MPUI1610_RESTORE(MPUI_CTRL); MPUI1610_RESTORE(MPUI_DSP_BOOT_CONFIG); MPUI1610_RESTORE(MPUI_DSP_API_CONFIG); MPUI1610_RESTORE(EMIFS_CONFIG); MPUI1610_RESTORE(EMIFF_SDRAM_CONFIG); MPUI1610_RESTORE(OMAP_IH1_MIR); MPUI1610_RESTORE(OMAP_IH2_0_MIR); MPUI1610_RESTORE(OMAP_IH2_1_MIR); MPUI1610_RESTORE(OMAP_IH2_2_MIR); MPUI1610_RESTORE(OMAP_IH2_3_MIR); } if (!cpu_is_omap15xx()) omap_writew(0, ULPD_SOFT_DISABLE_REQ_REG); /* * Re-enable interrupts */ local_irq_enable(); local_fiq_enable(); omap_serial_wake_trigger(0); printk(KERN_INFO "PM: OMAP%x is re-starting from deep sleep...\n", omap_rev()); } #ifdef CONFIG_DEBUG_FS /* * Read system PM registers for debugging */ static int omap_pm_debug_show(struct seq_file *m, void *v) { ARM_SAVE(ARM_CKCTL); ARM_SAVE(ARM_IDLECT1); ARM_SAVE(ARM_IDLECT2); if (!(cpu_is_omap15xx())) ARM_SAVE(ARM_IDLECT3); ARM_SAVE(ARM_EWUPCT); ARM_SAVE(ARM_RSTCT1); ARM_SAVE(ARM_RSTCT2); ARM_SAVE(ARM_SYSST); ULPD_SAVE(ULPD_IT_STATUS); ULPD_SAVE(ULPD_CLOCK_CTRL); ULPD_SAVE(ULPD_SOFT_REQ); ULPD_SAVE(ULPD_STATUS_REQ); ULPD_SAVE(ULPD_DPLL_CTRL); ULPD_SAVE(ULPD_POWER_CTRL); if (cpu_is_omap7xx()) { MPUI7XX_SAVE(MPUI_CTRL); MPUI7XX_SAVE(MPUI_DSP_STATUS); MPUI7XX_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI7XX_SAVE(MPUI_DSP_API_CONFIG); MPUI7XX_SAVE(EMIFF_SDRAM_CONFIG); MPUI7XX_SAVE(EMIFS_CONFIG); } else if (cpu_is_omap15xx()) { MPUI1510_SAVE(MPUI_CTRL); MPUI1510_SAVE(MPUI_DSP_STATUS); MPUI1510_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI1510_SAVE(MPUI_DSP_API_CONFIG); MPUI1510_SAVE(EMIFF_SDRAM_CONFIG); MPUI1510_SAVE(EMIFS_CONFIG); } else if (cpu_is_omap16xx()) { MPUI1610_SAVE(MPUI_CTRL); MPUI1610_SAVE(MPUI_DSP_STATUS); MPUI1610_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI1610_SAVE(MPUI_DSP_API_CONFIG); MPUI1610_SAVE(EMIFF_SDRAM_CONFIG); MPUI1610_SAVE(EMIFS_CONFIG); } seq_printf(m, "ARM_CKCTL_REG: 0x%-8x \n" "ARM_IDLECT1_REG: 0x%-8x \n" "ARM_IDLECT2_REG: 0x%-8x \n" "ARM_IDLECT3_REG: 0x%-8x \n" "ARM_EWUPCT_REG: 0x%-8x \n" "ARM_RSTCT1_REG: 0x%-8x \n" "ARM_RSTCT2_REG: 0x%-8x \n" "ARM_SYSST_REG: 0x%-8x \n" "ULPD_IT_STATUS_REG: 0x%-4x \n" "ULPD_CLOCK_CTRL_REG: 0x%-4x \n" "ULPD_SOFT_REQ_REG: 0x%-4x \n" "ULPD_DPLL_CTRL_REG: 0x%-4x \n" "ULPD_STATUS_REQ_REG: 0x%-4x \n" "ULPD_POWER_CTRL_REG: 0x%-4x \n", ARM_SHOW(ARM_CKCTL), ARM_SHOW(ARM_IDLECT1), ARM_SHOW(ARM_IDLECT2), ARM_SHOW(ARM_IDLECT3), ARM_SHOW(ARM_EWUPCT), ARM_SHOW(ARM_RSTCT1), ARM_SHOW(ARM_RSTCT2), ARM_SHOW(ARM_SYSST), ULPD_SHOW(ULPD_IT_STATUS), ULPD_SHOW(ULPD_CLOCK_CTRL), ULPD_SHOW(ULPD_SOFT_REQ), ULPD_SHOW(ULPD_DPLL_CTRL), ULPD_SHOW(ULPD_STATUS_REQ), ULPD_SHOW(ULPD_POWER_CTRL)); if (cpu_is_omap7xx()) { seq_printf(m, "MPUI7XX_CTRL_REG 0x%-8x \n" "MPUI7XX_DSP_STATUS_REG: 0x%-8x \n" "MPUI7XX_DSP_BOOT_CONFIG_REG: 0x%-8x \n" "MPUI7XX_DSP_API_CONFIG_REG: 0x%-8x \n" "MPUI7XX_SDRAM_CONFIG_REG: 0x%-8x \n" "MPUI7XX_EMIFS_CONFIG_REG: 0x%-8x \n", MPUI7XX_SHOW(MPUI_CTRL), MPUI7XX_SHOW(MPUI_DSP_STATUS), MPUI7XX_SHOW(MPUI_DSP_BOOT_CONFIG), MPUI7XX_SHOW(MPUI_DSP_API_CONFIG), MPUI7XX_SHOW(EMIFF_SDRAM_CONFIG), MPUI7XX_SHOW(EMIFS_CONFIG)); } else if (cpu_is_omap15xx()) { seq_printf(m, "MPUI1510_CTRL_REG 0x%-8x \n" "MPUI1510_DSP_STATUS_REG: 0x%-8x \n" "MPUI1510_DSP_BOOT_CONFIG_REG: 0x%-8x \n" "MPUI1510_DSP_API_CONFIG_REG: 0x%-8x \n" "MPUI1510_SDRAM_CONFIG_REG: 0x%-8x \n" "MPUI1510_EMIFS_CONFIG_REG: 0x%-8x \n", MPUI1510_SHOW(MPUI_CTRL), MPUI1510_SHOW(MPUI_DSP_STATUS), MPUI1510_SHOW(MPUI_DSP_BOOT_CONFIG), MPUI1510_SHOW(MPUI_DSP_API_CONFIG), MPUI1510_SHOW(EMIFF_SDRAM_CONFIG), MPUI1510_SHOW(EMIFS_CONFIG)); } else if (cpu_is_omap16xx()) { seq_printf(m, "MPUI1610_CTRL_REG 0x%-8x \n" "MPUI1610_DSP_STATUS_REG: 0x%-8x \n" "MPUI1610_DSP_BOOT_CONFIG_REG: 0x%-8x \n" "MPUI1610_DSP_API_CONFIG_REG: 0x%-8x \n" "MPUI1610_SDRAM_CONFIG_REG: 0x%-8x \n" "MPUI1610_EMIFS_CONFIG_REG: 0x%-8x \n", MPUI1610_SHOW(MPUI_CTRL), MPUI1610_SHOW(MPUI_DSP_STATUS), MPUI1610_SHOW(MPUI_DSP_BOOT_CONFIG), MPUI1610_SHOW(MPUI_DSP_API_CONFIG), MPUI1610_SHOW(EMIFF_SDRAM_CONFIG), MPUI1610_SHOW(EMIFS_CONFIG)); } return 0; } static int omap_pm_debug_open(struct inode *inode, struct file *file) { return single_open(file, omap_pm_debug_show, &inode->i_private); } static const struct file_operations omap_pm_debug_fops = { .open = omap_pm_debug_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void omap_pm_init_debugfs(void) { struct dentry *d; d = debugfs_create_dir("pm_debug", NULL); if (!d) return; (void) debugfs_create_file("omap_pm", S_IWUSR | S_IRUGO, d, NULL, &omap_pm_debug_fops); } #endif /* CONFIG_DEBUG_FS */ /* * omap_pm_prepare - Do preliminary suspend work. * */ static int omap_pm_prepare(void) { /* We cannot sleep in idle until we have resumed */ cpu_idle_poll_ctrl(true); return 0; } /* * omap_pm_enter - Actually enter a sleep state. * @state: State we're entering. * */ static int omap_pm_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: omap1_pm_suspend(); break; default: return -EINVAL; } return 0; } /** * omap_pm_finish - Finish up suspend sequence. * * This is called after we wake back up (or if entering the sleep state * failed). */ static void omap_pm_finish(void) { cpu_idle_poll_ctrl(false); } static irqreturn_t omap_wakeup_interrupt(int irq, void *dev) { return IRQ_HANDLED; } static struct irqaction omap_wakeup_irq = { .name = "peripheral wakeup", .flags = IRQF_DISABLED, .handler = omap_wakeup_interrupt }; static const struct platform_suspend_ops omap_pm_ops = { .prepare = omap_pm_prepare, .enter = omap_pm_enter, .finish = omap_pm_finish, .valid = suspend_valid_only_mem, }; static int __init omap_pm_init(void) { #ifdef CONFIG_OMAP_32K_TIMER int error; #endif if (!cpu_class_is_omap1()) return -ENODEV; printk("Power Management for TI OMAP.\n"); /* * We copy the assembler sleep/wakeup routines to SRAM. * These routines need to be in SRAM as that's the only * memory the MPU can see when it wakes up. */ if (cpu_is_omap7xx()) { omap_sram_suspend = omap_sram_push(omap7xx_cpu_suspend, omap7xx_cpu_suspend_sz); } else if (cpu_is_omap15xx()) { omap_sram_suspend = omap_sram_push(omap1510_cpu_suspend, omap1510_cpu_suspend_sz); } else if (cpu_is_omap16xx()) { omap_sram_suspend = omap_sram_push(omap1610_cpu_suspend, omap1610_cpu_suspend_sz); } if (omap_sram_suspend == NULL) { printk(KERN_ERR "PM not initialized: Missing SRAM support\n"); return -ENODEV; } arm_pm_idle = omap1_pm_idle; if (cpu_is_omap7xx()) setup_irq(INT_7XX_WAKE_UP_REQ, &omap_wakeup_irq); else if (cpu_is_omap16xx()) setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq); /* Program new power ramp-up time * (0 for most boards since we don't lower voltage when in deep sleep) */ omap_writew(ULPD_SETUP_ANALOG_CELL_3_VAL, ULPD_SETUP_ANALOG_CELL_3); /* Setup ULPD POWER_CTRL_REG - enter deep sleep whenever possible */ omap_writew(ULPD_POWER_CTRL_REG_VAL, ULPD_POWER_CTRL); /* Configure IDLECT3 */ if (cpu_is_omap7xx()) omap_writel(OMAP7XX_IDLECT3_VAL, OMAP7XX_IDLECT3); else if (cpu_is_omap16xx()) omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3); suspend_set_ops(&omap_pm_ops); #ifdef CONFIG_DEBUG_FS omap_pm_init_debugfs(); #endif #ifdef CONFIG_OMAP_32K_TIMER error = sysfs_create_file(power_kobj, &sleep_while_idle_attr.attr); if (error) printk(KERN_ERR "sysfs_create_file failed: %d\n", error); #endif if (cpu_is_omap16xx()) { /* configure LOW_PWR pin */ omap_cfg_reg(T20_1610_LOW_PWR); } return 0; } __initcall(omap_pm_init);
gpl-2.0
UniqueDroid/lge-kernel-x3-p880
arch/arm/mach-ux500/devices-common.c
2711
2976
/* * Copyright (C) ST-Ericsson SA 2010 * * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson * License terms: GNU General Public License (GPL), version 2. */ #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/amba/bus.h> #include <plat/gpio.h> #include <mach/hardware.h> #include "devices-common.h" struct amba_device * dbx500_add_amba_device(const char *name, resource_size_t base, int irq, void *pdata, unsigned int periphid) { struct amba_device *dev; int ret; dev = kzalloc(sizeof *dev, GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); dev->dev.init_name = name; dev->res.start = base; dev->res.end = base + SZ_4K - 1; dev->res.flags = IORESOURCE_MEM; dev->dma_mask = DMA_BIT_MASK(32); dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); dev->irq[0] = irq; dev->irq[1] = NO_IRQ; dev->periphid = periphid; dev->dev.platform_data = pdata; ret = amba_device_register(dev, &iomem_resource); if (ret) { kfree(dev); return ERR_PTR(ret); } return dev; } static struct platform_device * dbx500_add_platform_device(const char *name, int id, void *pdata, struct resource *res, int resnum) { struct platform_device *dev; int ret; dev = platform_device_alloc(name, id); if (!dev) return ERR_PTR(-ENOMEM); dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); dev->dev.dma_mask = &dev->dev.coherent_dma_mask; ret = platform_device_add_resources(dev, res, resnum); if (ret) goto out_free; dev->dev.platform_data = pdata; ret = platform_device_add(dev); if (ret) goto out_free; return dev; out_free: platform_device_put(dev); return ERR_PTR(ret); } struct platform_device * dbx500_add_platform_device_4k1irq(const char *name, int id, resource_size_t base, int irq, void *pdata) { struct resource resources[] = { [0] = { .start = base, .end = base + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = irq, .end = irq, .flags = IORESOURCE_IRQ, } }; return dbx500_add_platform_device(name, id, pdata, resources, ARRAY_SIZE(resources)); } static struct platform_device * dbx500_add_gpio(int id, resource_size_t addr, int irq, struct nmk_gpio_platform_data *pdata) { struct resource resources[] = { { .start = addr, .end = addr + 127, .flags = IORESOURCE_MEM, }, { .start = irq, .end = irq, .flags = IORESOURCE_IRQ, } }; return platform_device_register_resndata(NULL, "gpio", id, resources, ARRAY_SIZE(resources), pdata, sizeof(*pdata)); } void dbx500_add_gpios(resource_size_t *base, int num, int irq, struct nmk_gpio_platform_data *pdata) { int first = 0; int i; for (i = 0; i < num; i++, first += 32, irq++) { pdata->first_gpio = first; pdata->first_irq = NOMADIK_GPIO_TO_IRQ(first); pdata->num_gpio = 32; dbx500_add_gpio(i, base[i], irq, pdata); } }
gpl-2.0
Dm47021/LGE_Kernel_F6mt
drivers/media/video/v4l2-subdev.c
4247
8701
/* * V4L2 sub-device * * Copyright (C) 2010 Nokia Corporation * * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/videodev2.h> #include <linux/export.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd) { #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) /* Allocate try format and crop in the same memory block */ fh->try_fmt = kzalloc((sizeof(*fh->try_fmt) + sizeof(*fh->try_crop)) * sd->entity.num_pads, GFP_KERNEL); if (fh->try_fmt == NULL) return -ENOMEM; fh->try_crop = (struct v4l2_rect *) (fh->try_fmt + sd->entity.num_pads); #endif return 0; } static void subdev_fh_free(struct v4l2_subdev_fh *fh) { #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) kfree(fh->try_fmt); fh->try_fmt = NULL; fh->try_crop = NULL; #endif } static int subdev_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_subdev_fh *subdev_fh; #if defined(CONFIG_MEDIA_CONTROLLER) struct media_entity *entity = NULL; #endif int ret; subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL); if (subdev_fh == NULL) return -ENOMEM; ret = subdev_fh_init(subdev_fh, sd); if (ret) { kfree(subdev_fh); return ret; } v4l2_fh_init(&subdev_fh->vfh, vdev); v4l2_fh_add(&subdev_fh->vfh); file->private_data = &subdev_fh->vfh; #if defined(CONFIG_MEDIA_CONTROLLER) if (sd->v4l2_dev->mdev) { entity = media_entity_get(&sd->entity); if (!entity) { ret = -EBUSY; goto err; } } #endif if (sd->internal_ops && sd->internal_ops->open) { ret = sd->internal_ops->open(sd, subdev_fh); if (ret < 0) goto err; } return 0; err: #if defined(CONFIG_MEDIA_CONTROLLER) if (entity) media_entity_put(entity); #endif v4l2_fh_del(&subdev_fh->vfh); v4l2_fh_exit(&subdev_fh->vfh); subdev_fh_free(subdev_fh); kfree(subdev_fh); return ret; } static int subdev_close(struct file *file) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_fh *vfh = file->private_data; struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); if (sd->internal_ops && sd->internal_ops->close) sd->internal_ops->close(sd, subdev_fh); #if defined(CONFIG_MEDIA_CONTROLLER) if (sd->v4l2_dev->mdev) media_entity_put(&sd->entity); #endif v4l2_fh_del(vfh); v4l2_fh_exit(vfh); subdev_fh_free(subdev_fh); kfree(subdev_fh); file->private_data = NULL; return 0; } static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_fh *vfh = file->private_data; #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); #endif switch (cmd) { case VIDIOC_QUERYCTRL: return v4l2_queryctrl(vfh->ctrl_handler, arg); case VIDIOC_QUERYMENU: return v4l2_querymenu(vfh->ctrl_handler, arg); case VIDIOC_G_CTRL: return v4l2_g_ctrl(vfh->ctrl_handler, arg); case VIDIOC_S_CTRL: return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg); case VIDIOC_G_EXT_CTRLS: return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg); case VIDIOC_S_EXT_CTRLS: return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg); case VIDIOC_TRY_EXT_CTRLS: return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg); case VIDIOC_DQEVENT: if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) return -ENOIOCTLCMD; return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK); case VIDIOC_SUBSCRIBE_EVENT: return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg); case VIDIOC_UNSUBSCRIBE_EVENT: return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg); #ifdef CONFIG_VIDEO_ADV_DEBUG case VIDIOC_DBG_G_REGISTER: { struct v4l2_dbg_register *p = arg; if (!capable(CAP_SYS_ADMIN)) return -EPERM; return v4l2_subdev_call(sd, core, g_register, p); } case VIDIOC_DBG_S_REGISTER: { struct v4l2_dbg_register *p = arg; if (!capable(CAP_SYS_ADMIN)) return -EPERM; return v4l2_subdev_call(sd, core, s_register, p); } #endif case VIDIOC_LOG_STATUS: { int ret; pr_info("%s: ================= START STATUS =================\n", sd->name); ret = v4l2_subdev_call(sd, core, log_status); pr_info("%s: ================== END STATUS ==================\n", sd->name); return ret; } #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) case VIDIOC_SUBDEV_G_FMT: { struct v4l2_subdev_format *format = arg; if (format->which != V4L2_SUBDEV_FORMAT_TRY && format->which != V4L2_SUBDEV_FORMAT_ACTIVE) return -EINVAL; if (format->pad >= sd->entity.num_pads) return -EINVAL; return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh, format); } case VIDIOC_SUBDEV_S_FMT: { struct v4l2_subdev_format *format = arg; if (format->which != V4L2_SUBDEV_FORMAT_TRY && format->which != V4L2_SUBDEV_FORMAT_ACTIVE) return -EINVAL; if (format->pad >= sd->entity.num_pads) return -EINVAL; return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh, format); } case VIDIOC_SUBDEV_G_CROP: { struct v4l2_subdev_crop *crop = arg; if (crop->which != V4L2_SUBDEV_FORMAT_TRY && crop->which != V4L2_SUBDEV_FORMAT_ACTIVE) return -EINVAL; if (crop->pad >= sd->entity.num_pads) return -EINVAL; return v4l2_subdev_call(sd, pad, get_crop, subdev_fh, crop); } case VIDIOC_SUBDEV_S_CROP: { struct v4l2_subdev_crop *crop = arg; if (crop->which != V4L2_SUBDEV_FORMAT_TRY && crop->which != V4L2_SUBDEV_FORMAT_ACTIVE) return -EINVAL; if (crop->pad >= sd->entity.num_pads) return -EINVAL; return v4l2_subdev_call(sd, pad, set_crop, subdev_fh, crop); } case VIDIOC_SUBDEV_ENUM_MBUS_CODE: { struct v4l2_subdev_mbus_code_enum *code = arg; if (code->pad >= sd->entity.num_pads) return -EINVAL; return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh, code); } case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: { struct v4l2_subdev_frame_size_enum *fse = arg; if (fse->pad >= sd->entity.num_pads) return -EINVAL; return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh, fse); } case VIDIOC_SUBDEV_G_FRAME_INTERVAL: return v4l2_subdev_call(sd, video, g_frame_interval, arg); case VIDIOC_SUBDEV_S_FRAME_INTERVAL: return v4l2_subdev_call(sd, video, s_frame_interval, arg); case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: { struct v4l2_subdev_frame_interval_enum *fie = arg; if (fie->pad >= sd->entity.num_pads) return -EINVAL; return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh, fie); } #endif default: return v4l2_subdev_call(sd, core, ioctl, cmd, arg); } return 0; } static long subdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return video_usercopy(file, cmd, arg, subdev_do_ioctl); } static unsigned int subdev_poll(struct file *file, poll_table *wait) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_fh *fh = file->private_data; if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) return POLLERR; poll_wait(file, &fh->wait, wait); if (v4l2_event_pending(fh)) return POLLPRI; return 0; } const struct v4l2_file_operations v4l2_subdev_fops = { .owner = THIS_MODULE, .open = subdev_open, .unlocked_ioctl = subdev_ioctl, .release = subdev_close, .poll = subdev_poll, }; void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops) { INIT_LIST_HEAD(&sd->list); BUG_ON(!ops); sd->ops = ops; sd->v4l2_dev = NULL; sd->flags = 0; sd->name[0] = '\0'; sd->grp_id = 0; sd->dev_priv = NULL; sd->host_priv = NULL; #if defined(CONFIG_MEDIA_CONTROLLER) sd->entity.name = sd->name; sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV; #endif } EXPORT_SYMBOL(v4l2_subdev_init);
gpl-2.0
sultanqasim/android_kernel_motorola_otus
drivers/mfd/stmpe-spi.c
5015
3311
/* * ST Microelectronics MFD: stmpe's spi client specific driver * * Copyright (C) ST Microelectronics SA 2011 * * License Terms: GNU General Public License, version 2 * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics */ #include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include "stmpe.h" #define READ_CMD (1 << 7) static int spi_reg_read(struct stmpe *stmpe, u8 reg) { struct spi_device *spi = stmpe->client; int status = spi_w8r16(spi, reg | READ_CMD); return (status < 0) ? status : status >> 8; } static int spi_reg_write(struct stmpe *stmpe, u8 reg, u8 val) { struct spi_device *spi = stmpe->client; u16 cmd = (val << 8) | reg; return spi_write(spi, (const u8 *)&cmd, 2); } static int spi_block_read(struct stmpe *stmpe, u8 reg, u8 length, u8 *values) { int ret, i; for (i = 0; i < length; i++) { ret = spi_reg_read(stmpe, reg + i); if (ret < 0) return ret; *(values + i) = ret; } return 0; } static int spi_block_write(struct stmpe *stmpe, u8 reg, u8 length, const u8 *values) { int ret = 0, i; for (i = length; i > 0; i--, reg++) { ret = spi_reg_write(stmpe, reg, *(values + i - 1)); if (ret < 0) return ret; } return ret; } static void spi_init(struct stmpe *stmpe) { struct spi_device *spi = stmpe->client; spi->bits_per_word = 8; /* This register is only present for stmpe811 */ if (stmpe->variant->id_val == 0x0811) spi_reg_write(stmpe, STMPE811_REG_SPI_CFG, spi->mode); if (spi_setup(spi) < 0) dev_dbg(&spi->dev, "spi_setup failed\n"); } static struct stmpe_client_info spi_ci = { .read_byte = spi_reg_read, .write_byte = spi_reg_write, .read_block = spi_block_read, .write_block = spi_block_write, .init = spi_init, }; static int __devinit stmpe_spi_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); /* don't exceed max specified rate - 1MHz - Limitation of STMPE */ if (spi->max_speed_hz > 1000000) { dev_dbg(&spi->dev, "f(sample) %d KHz?\n", (spi->max_speed_hz/1000)); return -EINVAL; } spi_ci.irq = spi->irq; spi_ci.client = spi; spi_ci.dev = &spi->dev; return stmpe_probe(&spi_ci, id->driver_data); } static int __devexit stmpe_spi_remove(struct spi_device *spi) { struct stmpe *stmpe = dev_get_drvdata(&spi->dev); return stmpe_remove(stmpe); } static const struct spi_device_id stmpe_spi_id[] = { { "stmpe610", STMPE610 }, { "stmpe801", STMPE801 }, { "stmpe811", STMPE811 }, { "stmpe1601", STMPE1601 }, { "stmpe2401", STMPE2401 }, { "stmpe2403", STMPE2403 }, { } }; MODULE_DEVICE_TABLE(spi, stmpe_id); static struct spi_driver stmpe_spi_driver = { .driver = { .name = "stmpe-spi", .bus = &spi_bus_type, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &stmpe_dev_pm_ops, #endif }, .probe = stmpe_spi_probe, .remove = __devexit_p(stmpe_spi_remove), .id_table = stmpe_spi_id, }; static int __init stmpe_init(void) { return spi_register_driver(&stmpe_spi_driver); } subsys_initcall(stmpe_init); static void __exit stmpe_exit(void) { spi_unregister_driver(&stmpe_spi_driver); } module_exit(stmpe_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver"); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
gpl-2.0
LinuxEmbed/LinuxEmbed-RPI-Linux
arch/alpha/kernel/time.c
7319
14477
/* * linux/arch/alpha/kernel/time.c * * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds * * This file contains the PC-specific time handling details: * reading the RTC at bootup, etc.. * 1994-07-02 Alan Modra * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime * 1995-03-26 Markus Kuhn * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887 * precision CMOS clock update * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 * "A Kernel Model for Precision Timekeeping" by Dave Mills * 1997-01-09 Adrian Sun * use interval timer if CONFIG_RTC=y * 1997-10-29 John Bowman (bowman@math.ualberta.ca) * fixed tick loss calculation in timer_interrupt * (round system clock to nearest tick instead of truncating) * fixed algorithm in time_init for getting time from CMOS clock * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net) * fixed algorithm in do_gettimeofday() for calculating the precise time * from processor cycle counter (now taking lost_ticks into account) * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de> * Fixed time_init to be aware of epoches != 1900. This prevents * booting up in 2048 for me;) Code is stolen from rtc.c. * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com> * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM */ #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/bcd.h> #include <linux/profile.h> #include <linux/irq_work.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/hwrpb.h> #include <asm/rtc.h> #include <linux/mc146818rtc.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/clocksource.h> #include "proto.h" #include "irq_impl.h" static int set_rtc_mmss(unsigned long); DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); #define TICK_SIZE (tick_nsec / 1000) /* * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting * by 48 gives us 16 bits for HZ while keeping the accuracy good even * for large CPU clock rates. */ #define FIX_SHIFT 48 /* lump static variables together for more efficient access: */ static struct { /* cycle counter last time it got invoked */ __u32 last_time; /* ticks/cycle * 2^48 */ unsigned long scaled_ticks_per_cycle; /* partial unused tick */ unsigned long partial_tick; } state; unsigned long est_cycle_freq; #ifdef CONFIG_IRQ_WORK DEFINE_PER_CPU(u8, irq_work_pending); #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 #define test_irq_work_pending() __get_cpu_var(irq_work_pending) #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 void arch_irq_work_raise(void) { set_irq_work_pending_flag(); } #else /* CONFIG_IRQ_WORK */ #define test_irq_work_pending() 0 #define clear_irq_work_pending() #endif /* CONFIG_IRQ_WORK */ static inline __u32 rpcc(void) { __u32 result; asm volatile ("rpcc %0" : "=r"(result)); return result; } int update_persistent_clock(struct timespec now) { return set_rtc_mmss(now.tv_sec); } void read_persistent_clock(struct timespec *ts) { unsigned int year, mon, day, hour, min, sec, epoch; sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); hour = CMOS_READ(RTC_HOURS); day = CMOS_READ(RTC_DAY_OF_MONTH); mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { sec = bcd2bin(sec); min = bcd2bin(min); hour = bcd2bin(hour); day = bcd2bin(day); mon = bcd2bin(mon); year = bcd2bin(year); } /* PC-like is standard; used for year >= 70 */ epoch = 1900; if (year < 20) epoch = 2000; else if (year >= 20 && year < 48) /* NT epoch */ epoch = 1980; else if (year >= 48 && year < 70) /* Digital UNIX epoch */ epoch = 1952; printk(KERN_INFO "Using epoch = %d\n", epoch); if ((year += epoch) < 1970) year += 100; ts->tv_sec = mktime(year, mon, day, hour, min, sec); ts->tv_nsec = 0; } /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "xtime_update()" routine every clocktick */ irqreturn_t timer_interrupt(int irq, void *dev) { unsigned long delta; __u32 now; long nticks; #ifndef CONFIG_SMP /* Not SMP, do kernel PC profiling here. */ profile_tick(CPU_PROFILING); #endif /* * Calculate how many ticks have passed since the last update, * including any previous partial leftover. Save any resulting * fraction for the next pass. */ now = rpcc(); delta = now - state.last_time; state.last_time = now; delta = delta * state.scaled_ticks_per_cycle + state.partial_tick; state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1); nticks = delta >> FIX_SHIFT; if (nticks) xtime_update(nticks); if (test_irq_work_pending()) { clear_irq_work_pending(); irq_work_run(); } #ifndef CONFIG_SMP while (nticks--) update_process_times(user_mode(get_irq_regs())); #endif return IRQ_HANDLED; } void __init common_init_rtc(void) { unsigned char x; /* Reset periodic interrupt frequency. */ x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; /* Test includes known working values on various platforms where 0x26 is wrong; we refuse to change those. */ if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x); CMOS_WRITE(0x26, RTC_FREQ_SELECT); } /* Turn on periodic interrupts. */ x = CMOS_READ(RTC_CONTROL); if (!(x & RTC_PIE)) { printk("Turning on RTC interrupts.\n"); x |= RTC_PIE; x &= ~(RTC_AIE | RTC_UIE); CMOS_WRITE(x, RTC_CONTROL); } (void) CMOS_READ(RTC_INTR_FLAGS); outb(0x36, 0x43); /* pit counter 0: system timer */ outb(0x00, 0x40); outb(0x00, 0x40); outb(0xb6, 0x43); /* pit counter 2: speaker */ outb(0x31, 0x42); outb(0x13, 0x42); init_rtc_irq(); } unsigned int common_get_rtc_time(struct rtc_time *time) { return __get_rtc_time(time); } int common_set_rtc_time(struct rtc_time *time) { return __set_rtc_time(time); } /* Validate a computed cycle counter result against the known bounds for the given processor core. There's too much brokenness in the way of timing hardware for any one method to work everywhere. :-( Return 0 if the result cannot be trusted, otherwise return the argument. */ static unsigned long __init validate_cc_value(unsigned long cc) { static struct bounds { unsigned int min, max; } cpu_hz[] __initdata = { [EV3_CPU] = { 50000000, 200000000 }, /* guess */ [EV4_CPU] = { 100000000, 300000000 }, [LCA4_CPU] = { 100000000, 300000000 }, /* guess */ [EV45_CPU] = { 200000000, 300000000 }, [EV5_CPU] = { 250000000, 433000000 }, [EV56_CPU] = { 333000000, 667000000 }, [PCA56_CPU] = { 400000000, 600000000 }, /* guess */ [PCA57_CPU] = { 500000000, 600000000 }, /* guess */ [EV6_CPU] = { 466000000, 600000000 }, [EV67_CPU] = { 600000000, 750000000 }, [EV68AL_CPU] = { 750000000, 940000000 }, [EV68CB_CPU] = { 1000000000, 1333333333 }, /* None of the following are shipping as of 2001-11-01. */ [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */ [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */ [EV7_CPU] = { 800000000, 1400000000 }, /* guess */ [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */ }; /* Allow for some drift in the crystal. 10MHz is more than enough. */ const unsigned int deviation = 10000000; struct percpu_struct *cpu; unsigned int index; cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset); index = cpu->type & 0xffffffff; /* If index out of bounds, no way to validate. */ if (index >= ARRAY_SIZE(cpu_hz)) return cc; /* If index contains no data, no way to validate. */ if (cpu_hz[index].max == 0) return cc; if (cc < cpu_hz[index].min - deviation || cc > cpu_hz[index].max + deviation) return 0; return cc; } /* * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from * arch/i386/time.c. */ #define CALIBRATE_LATCH 0xffff #define TIMEOUT_COUNT 0x100000 static unsigned long __init calibrate_cc_with_pit(void) { int cc, count = 0; /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); /* * Now let's take care of CTC channel 2 * * Set the Gate high, program CTC channel 2 for mode 0, * (interrupt on terminal count mode), binary count, * load 5 * LATCH count, (LSB and MSB) to begin countdown. */ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ cc = rpcc(); do { count++; } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT); cc = rpcc() - cc; /* Error: ECTCNEVERSET or ECPUTOOFAST. */ if (count <= 1 || count == TIMEOUT_COUNT) return 0; return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1); } /* The Linux interpretation of the CMOS clock register contents: When the Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the second which has precisely just started. Let's hope other operating systems interpret the RTC the same way. */ static unsigned long __init rpcc_after_update_in_progress(void) { do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)); do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); return rpcc(); } #ifndef CONFIG_SMP /* Until and unless we figure out how to get cpu cycle counters in sync and keep them there, we can't use the rpcc. */ static cycle_t read_rpcc(struct clocksource *cs) { cycle_t ret = (cycle_t)rpcc(); return ret; } static struct clocksource clocksource_rpcc = { .name = "rpcc", .rating = 300, .read = read_rpcc, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS }; static inline void register_rpcc_clocksource(long cycle_freq) { clocksource_register_hz(&clocksource_rpcc, cycle_freq); } #else /* !CONFIG_SMP */ static inline void register_rpcc_clocksource(long cycle_freq) { } #endif /* !CONFIG_SMP */ void __init time_init(void) { unsigned int cc1, cc2; unsigned long cycle_freq, tolerance; long diff; /* Calibrate CPU clock -- attempt #1. */ if (!est_cycle_freq) est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); cc1 = rpcc(); /* Calibrate CPU clock -- attempt #2. */ if (!est_cycle_freq) { cc1 = rpcc_after_update_in_progress(); cc2 = rpcc_after_update_in_progress(); est_cycle_freq = validate_cc_value(cc2 - cc1); cc1 = cc2; } cycle_freq = hwrpb->cycle_freq; if (est_cycle_freq) { /* If the given value is within 250 PPM of what we calculated, accept it. Otherwise, use what we found. */ tolerance = cycle_freq / 4000; diff = cycle_freq - est_cycle_freq; if (diff < 0) diff = -diff; if ((unsigned long)diff > tolerance) { cycle_freq = est_cycle_freq; printk("HWRPB cycle frequency bogus. " "Estimated %lu Hz\n", cycle_freq); } else { est_cycle_freq = 0; } } else if (! validate_cc_value (cycle_freq)) { printk("HWRPB cycle frequency bogus, " "and unable to estimate a proper value!\n"); } /* From John Bowman <bowman@math.ualberta.ca>: allow the values to settle, as the Update-In-Progress bit going low isn't good enough on some hardware. 2ms is our guess; we haven't found bogomips yet, but this is close on a 500Mhz box. */ __delay(1000000); if (HZ > (1<<16)) { extern void __you_loose (void); __you_loose(); } register_rpcc_clocksource(cycle_freq); state.last_time = cc1; state.scaled_ticks_per_cycle = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; state.partial_tick = 0L; /* Startup the timer source. */ alpha_mv.init_rtc(); } /* * In order to set the CMOS clock precisely, set_rtc_mmss has to be * called 500 ms after the second nowtime has started, because when * nowtime is written into the registers of the CMOS clock, it will * jump to the next second precisely 500 ms later. Check the Motorola * MC146818A or Dallas DS12887 data sheet for details. * * BUG: This routine does not handle hour overflow properly; it just * sets the minutes. Usually you won't notice until after reboot! */ static int set_rtc_mmss(unsigned long nowtime) { int retval = 0; int real_seconds, real_minutes, cmos_minutes; unsigned char save_control, save_freq_select; /* irq are locally disabled here */ spin_lock(&rtc_lock); /* Tell the clock it's being set */ save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); /* Stop and reset prescaler */ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); cmos_minutes = CMOS_READ(RTC_MINUTES); if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) cmos_minutes = bcd2bin(cmos_minutes); /* * since we're only adjusting minutes and seconds, * don't interfere with hour overflow. This avoids * messing with unknown time zones but requires your * RTC not to be off by more than 15 minutes */ real_seconds = nowtime % 60; real_minutes = nowtime / 60; if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) { /* correct for half hour time zone */ real_minutes += 30; } real_minutes %= 60; if (abs(real_minutes - cmos_minutes) < 30) { if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { real_seconds = bin2bcd(real_seconds); real_minutes = bin2bcd(real_minutes); } CMOS_WRITE(real_seconds,RTC_SECONDS); CMOS_WRITE(real_minutes,RTC_MINUTES); } else { printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; } /* The following flags have to be released exactly in this order, * otherwise the DS12887 (popular MC146818A clone with integrated * battery and quartz) will not reset the oscillator and will not * update precisely 500 ms later. You won't find this mentioned in * the Dallas Semiconductor data sheets, but who believes data * sheets anyway ... -- Markus Kuhn */ CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); spin_unlock(&rtc_lock); return retval; }
gpl-2.0