repo_name
string
path
string
copies
string
size
string
content
string
license
string
SlimRoms/kernel_lge_hammerhead
arch/arm/mach-s5p64x0/setup-spi.c
4909
1344
/* linux/arch/arm/mach-s5p64x0/setup-spi.c * * Copyright (C) 2011 Samsung Electronics Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/io.h> #include <plat/gpio-cfg.h> #include <plat/cpu.h> #include <plat/s3c64xx-spi.h> #ifdef CONFIG_S3C64XX_DEV_SPI0 struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = { .fifo_lvl_mask = 0x1ff, .rx_lvl_offset = 15, .tx_st_done = 25, }; int s3c64xx_spi0_cfg_gpio(struct platform_device *dev) { if (soc_is_s5p6450()) s3c_gpio_cfgall_range(S5P6450_GPC(0), 3, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); else s3c_gpio_cfgall_range(S5P6440_GPC(0), 3, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif #ifdef CONFIG_S3C64XX_DEV_SPI1 struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 15, .tx_st_done = 25, }; int s3c64xx_spi1_cfg_gpio(struct platform_device *dev) { if (soc_is_s5p6450()) s3c_gpio_cfgall_range(S5P6450_GPC(4), 3, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); else s3c_gpio_cfgall_range(S5P6440_GPC(4), 3, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif
gpl-2.0
SlimRoms/kernel_asus_flo
drivers/cpufreq/acpi-cpufreq.c
5421
19612
/* * acpi-cpufreq.c - ACPI Processor P-States Driver * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/compiler.h> #include <linux/dmi.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <acpi/processor.h> #include <asm/msr.h> #include <asm/processor.h> #include <asm/cpufeature.h> #include "mperf.h" MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); enum { UNDEFINED_CAPABLE = 0, SYSTEM_INTEL_MSR_CAPABLE, SYSTEM_IO_CAPABLE, }; #define INTEL_MSR_RANGE (0xffff) struct acpi_cpufreq_data { struct acpi_processor_performance *acpi_data; struct cpufreq_frequency_table *freq_table; unsigned int resume; unsigned int cpu_feature; }; static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); /* acpi_perf_data is a pointer to percpu data. */ static struct acpi_processor_performance __percpu *acpi_perf_data; static struct cpufreq_driver acpi_cpufreq_driver; static unsigned int acpi_pstate_strict; static int check_est_cpu(unsigned int cpuid) { struct cpuinfo_x86 *cpu = &cpu_data(cpuid); return cpu_has(cpu, X86_FEATURE_EST); } static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) { struct acpi_processor_performance *perf; int i; perf = data->acpi_data; for (i = 0; i < perf->state_count; i++) { if (value == perf->states[i].status) return data->freq_table[i].frequency; } return 0; } static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) { int i; struct acpi_processor_performance *perf; msr &= INTEL_MSR_RANGE; perf = data->acpi_data; for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { if (msr == perf->states[data->freq_table[i].index].status) return data->freq_table[i].frequency; } return data->freq_table[0].frequency; } static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) { switch (data->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: return extract_msr(val, data); case SYSTEM_IO_CAPABLE: return extract_io(val, data); default: return 0; } } struct msr_addr { u32 reg; }; struct io_addr { u16 port; u8 bit_width; }; struct drv_cmd { unsigned int type; const struct cpumask *mask; union { struct msr_addr msr; struct io_addr io; } addr; u32 val; }; /* Called via smp_call_function_single(), on the target CPU */ static void do_drv_read(void *_cmd) { struct drv_cmd *cmd = _cmd; u32 h; switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: rdmsr(cmd->addr.msr.reg, cmd->val, h); break; case SYSTEM_IO_CAPABLE: acpi_os_read_port((acpi_io_address)cmd->addr.io.port, &cmd->val, (u32)cmd->addr.io.bit_width); break; default: break; } } /* Called via smp_call_function_many(), on the target CPUs */ static void do_drv_write(void *_cmd) { struct drv_cmd *cmd = _cmd; u32 lo, hi; switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: rdmsr(cmd->addr.msr.reg, lo, hi); lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); wrmsr(cmd->addr.msr.reg, lo, hi); break; case SYSTEM_IO_CAPABLE: acpi_os_write_port((acpi_io_address)cmd->addr.io.port, cmd->val, (u32)cmd->addr.io.bit_width); break; default: break; } } static void drv_read(struct drv_cmd *cmd) { int err; cmd->val = 0; err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1); WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ } static void drv_write(struct drv_cmd *cmd) { int this_cpu; this_cpu = get_cpu(); if (cpumask_test_cpu(this_cpu, cmd->mask)) do_drv_write(cmd); smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); put_cpu(); } static u32 get_cur_val(const struct cpumask *mask) { struct acpi_processor_performance *perf; struct drv_cmd cmd; if (unlikely(cpumask_empty(mask))) return 0; switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; cmd.addr.io.port = perf->control_register.address; cmd.addr.io.bit_width = perf->control_register.bit_width; break; default: return 0; } cmd.mask = mask; drv_read(&cmd); pr_debug("get_cur_val = %u\n", cmd.val); return cmd.val; } static unsigned int get_cur_freq_on_cpu(unsigned int cpu) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); unsigned int freq; unsigned int cached_freq; pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { return 0; } cached_freq = data->freq_table[data->acpi_data->state].frequency; freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); if (freq != cached_freq) { /* * The dreaded BIOS frequency change behind our back. * Force set the frequency on next target call. */ data->resume = 1; } pr_debug("cur freq = %u\n", freq); return freq; } static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, struct acpi_cpufreq_data *data) { unsigned int cur_freq; unsigned int i; for (i = 0; i < 100; i++) { cur_freq = extract_freq(get_cur_val(mask), data); if (cur_freq == freq) return 1; udelay(10); } return 0; } static int acpi_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_processor_performance *perf; struct cpufreq_freqs freqs; struct drv_cmd cmd; unsigned int next_state = 0; /* Index into freq_table */ unsigned int next_perf_state = 0; /* Index into perf table */ unsigned int i; int result = 0; pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { return -ENODEV; } perf = data->acpi_data; result = cpufreq_frequency_table_target(policy, data->freq_table, target_freq, relation, &next_state); if (unlikely(result)) { result = -ENODEV; goto out; } next_perf_state = data->freq_table[next_state].index; if (perf->state == next_perf_state) { if (unlikely(data->resume)) { pr_debug("Called after resume, resetting to P%d\n", next_perf_state); data->resume = 0; } else { pr_debug("Already at target state (P%d)\n", next_perf_state); goto out; } } switch (data->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_CTL; cmd.val = (u32) perf->states[next_perf_state].control; break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; cmd.addr.io.port = perf->control_register.address; cmd.addr.io.bit_width = perf->control_register.bit_width; cmd.val = (u32) perf->states[next_perf_state].control; break; default: result = -ENODEV; goto out; } /* cpufreq holds the hotplug lock, so we are safe from here on */ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) cmd.mask = policy->cpus; else cmd.mask = cpumask_of(policy->cpu); freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } drv_write(&cmd); if (acpi_pstate_strict) { if (!check_freqs(cmd.mask, freqs.new, data)) { pr_debug("acpi_cpufreq_target failed (%d)\n", policy->cpu); result = -EAGAIN; goto out; } } for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } perf->state = next_perf_state; out: return result; } static int acpi_cpufreq_verify(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); pr_debug("acpi_cpufreq_verify\n"); return cpufreq_frequency_table_verify(policy, data->freq_table); } static unsigned long acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) { struct acpi_processor_performance *perf = data->acpi_data; if (cpu_khz) { /* search the closest match to cpu_khz */ unsigned int i; unsigned long freq; unsigned long freqn = perf->states[0].core_frequency * 1000; for (i = 0; i < (perf->state_count-1); i++) { freq = freqn; freqn = perf->states[i+1].core_frequency * 1000; if ((2 * cpu_khz) > (freqn + freq)) { perf->state = i; return freq; } } perf->state = perf->state_count-1; return freqn; } else { /* assume CPU is at P0... */ perf->state = 0; return perf->states[0].core_frequency * 1000; } } static void free_acpi_perf_data(void) { unsigned int i; /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ for_each_possible_cpu(i) free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) ->shared_cpu_map); free_percpu(acpi_perf_data); } /* * acpi_cpufreq_early_init - initialize ACPI P-States library * * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) * in order to determine correct frequency and voltage pairings. We can * do _PDC and _PSD and find out the processor dependency for the * actual init that will happen later... */ static int __init acpi_cpufreq_early_init(void) { unsigned int i; pr_debug("acpi_cpufreq_early_init\n"); acpi_perf_data = alloc_percpu(struct acpi_processor_performance); if (!acpi_perf_data) { pr_debug("Memory allocation error for acpi_perf_data.\n"); return -ENOMEM; } for_each_possible_cpu(i) { if (!zalloc_cpumask_var_node( &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, GFP_KERNEL, cpu_to_node(i))) { /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ free_acpi_perf_data(); return -ENOMEM; } } /* Do initialization in ACPI core */ acpi_processor_preregister_performance(acpi_perf_data); return 0; } #ifdef CONFIG_SMP /* * Some BIOSes do SW_ANY coordination internally, either set it up in hw * or do it in BIOS firmware and won't inform about it to OS. If not * detected, this has a side effect of making CPU run at a different speed * than OS intended it to run at. Detect it and handle it cleanly. */ static int bios_with_sw_any_bug; static int sw_any_bug_found(const struct dmi_system_id *d) { bios_with_sw_any_bug = 1; return 0; } static const struct dmi_system_id sw_any_bug_dmi_table[] = { { .callback = sw_any_bug_found, .ident = "Supermicro Server X6DLP", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), DMI_MATCH(DMI_BIOS_VERSION, "080010"), DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), }, }, { } }; static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) { /* Intel Xeon Processor 7100 Series Specification Update * http://www.intel.com/Assets/PDF/specupdate/314554.pdf * AL30: A Machine Check Exception (MCE) Occurring during an * Enhanced Intel SpeedStep Technology Ratio Change May Cause * Both Processor Cores to Lock Up. */ if (c->x86_vendor == X86_VENDOR_INTEL) { if ((c->x86 == 15) && (c->x86_model == 6) && (c->x86_mask == 8)) { printk(KERN_INFO "acpi-cpufreq: Intel(R) " "Xeon(R) 7100 Errata AL30, processors may " "lock up on frequency changes: disabling " "acpi-cpufreq.\n"); return -ENODEV; } } return 0; } #endif static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) { unsigned int i; unsigned int valid_states = 0; unsigned int cpu = policy->cpu; struct acpi_cpufreq_data *data; unsigned int result = 0; struct cpuinfo_x86 *c = &cpu_data(policy->cpu); struct acpi_processor_performance *perf; #ifdef CONFIG_SMP static int blacklisted; #endif pr_debug("acpi_cpufreq_cpu_init\n"); #ifdef CONFIG_SMP if (blacklisted) return blacklisted; blacklisted = acpi_cpufreq_blacklist(c); if (blacklisted) return blacklisted; #endif data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); if (!data) return -ENOMEM; data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); per_cpu(acfreq_data, cpu) = data; if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; result = acpi_processor_register_performance(data->acpi_data, cpu); if (result) goto err_free; perf = data->acpi_data; policy->shared_type = perf->shared_type; /* * Will let policy->cpus know about dependency only when software * coordination is required. */ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { cpumask_copy(policy->cpus, perf->shared_cpu_map); } cpumask_copy(policy->related_cpus, perf->shared_cpu_map); #ifdef CONFIG_SMP dmi_check_system(sw_any_bug_dmi_table); if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) { policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; cpumask_copy(policy->cpus, cpu_core_mask(cpu)); } #endif /* capability check */ if (perf->state_count <= 1) { pr_debug("No P-States\n"); result = -ENODEV; goto err_unreg; } if (perf->control_register.space_id != perf->status_register.space_id) { result = -ENODEV; goto err_unreg; } switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: pr_debug("SYSTEM IO addr space\n"); data->cpu_feature = SYSTEM_IO_CAPABLE; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: pr_debug("HARDWARE addr space\n"); if (!check_est_cpu(cpu)) { result = -ENODEV; goto err_unreg; } data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; break; default: pr_debug("Unknown addr space %d\n", (u32) (perf->control_register.space_id)); result = -ENODEV; goto err_unreg; } data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count+1), GFP_KERNEL); if (!data->freq_table) { result = -ENOMEM; goto err_unreg; } /* detect transition latency */ policy->cpuinfo.transition_latency = 0; for (i = 0; i < perf->state_count; i++) { if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000; } /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && policy->cpuinfo.transition_latency > 20 * 1000) { policy->cpuinfo.transition_latency = 20 * 1000; printk_once(KERN_INFO "P-state transition latency capped at 20 uS\n"); } /* table init */ for (i = 0; i < perf->state_count; i++) { if (i > 0 && perf->states[i].core_frequency >= data->freq_table[valid_states-1].frequency / 1000) continue; data->freq_table[valid_states].index = i; data->freq_table[valid_states].frequency = perf->states[i].core_frequency * 1000; valid_states++; } data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; perf->state = 0; result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); if (result) goto err_freqfree; if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n"); switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: /* Current speed is unknown and not detectable by IO port */ policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); break; case ACPI_ADR_SPACE_FIXED_HARDWARE: acpi_cpufreq_driver.get = get_cur_freq_on_cpu; policy->cur = get_cur_freq_on_cpu(cpu); break; default: break; } /* notify BIOS that we exist */ acpi_processor_notify_smm(THIS_MODULE); /* Check for APERF/MPERF support in hardware */ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; pr_debug("CPU%u - ACPI performance management activated.\n", cpu); for (i = 0; i < perf->state_count; i++) pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", (i == perf->state ? '*' : ' '), i, (u32) perf->states[i].core_frequency, (u32) perf->states[i].power, (u32) perf->states[i].transition_latency); cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); /* * the first call to ->target() should result in us actually * writing something to the appropriate registers. */ data->resume = 1; return result; err_freqfree: kfree(data->freq_table); err_unreg: acpi_processor_unregister_performance(perf, cpu); err_free: kfree(data); per_cpu(acfreq_data, cpu) = NULL; return result; } static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { cpufreq_frequency_table_put_attr(policy->cpu); per_cpu(acfreq_data, policy->cpu) = NULL; acpi_processor_unregister_performance(data->acpi_data, policy->cpu); kfree(data->freq_table); kfree(data); } return 0; } static int acpi_cpufreq_resume(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); pr_debug("acpi_cpufreq_resume\n"); data->resume = 1; return 0; } static struct freq_attr *acpi_cpufreq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver acpi_cpufreq_driver = { .verify = acpi_cpufreq_verify, .target = acpi_cpufreq_target, .bios_limit = acpi_processor_get_bios_limit, .init = acpi_cpufreq_cpu_init, .exit = acpi_cpufreq_cpu_exit, .resume = acpi_cpufreq_resume, .name = "acpi-cpufreq", .owner = THIS_MODULE, .attr = acpi_cpufreq_attr, }; static int __init acpi_cpufreq_init(void) { int ret; if (acpi_disabled) return 0; pr_debug("acpi_cpufreq_init\n"); ret = acpi_cpufreq_early_init(); if (ret) return ret; ret = cpufreq_register_driver(&acpi_cpufreq_driver); if (ret) free_acpi_perf_data(); return ret; } static void __exit acpi_cpufreq_exit(void) { pr_debug("acpi_cpufreq_exit\n"); cpufreq_unregister_driver(&acpi_cpufreq_driver); free_acpi_perf_data(); } module_param(acpi_pstate_strict, uint, 0644); MODULE_PARM_DESC(acpi_pstate_strict, "value 0 or non-zero. non-zero -> strict ACPI checks are " "performed during frequency changes."); late_initcall(acpi_cpufreq_init); module_exit(acpi_cpufreq_exit); MODULE_ALIAS("acpi");
gpl-2.0
lucatib/a33_linux
drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
7981
17860
#include "ieee80211.h" #include <linux/etherdevice.h> #include <linux/slab.h> #include "rtl819x_TS.h" void TsSetupTimeOut(unsigned long data) { // Not implement yet // This is used for WMMSA and ACM , that would send ADDTSReq frame. } void TsInactTimeout(unsigned long data) { // Not implement yet // This is used for WMMSA and ACM. // This function would be call when TS is no Tx/Rx for some period of time. } /******************************************************************************************************************** *function: I still not understand this function, so wait for further implementation * input: unsigned long data //acturally we send TX_TS_RECORD or RX_TS_RECORD to these timer * return: NULL * notice: ********************************************************************************************************************/ void RxPktPendingTimeout(unsigned long data) { PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data; struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]); PRX_REORDER_ENTRY pReorderEntry = NULL; //u32 flags = 0; unsigned long flags = 0; struct ieee80211_rxb *stats_IndicateArray[REORDER_WIN_SIZE]; u8 index = 0; bool bPktInBuf = false; spin_lock_irqsave(&(ieee->reorder_spinlock), flags); //PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK); IEEE80211_DEBUG(IEEE80211_DL_REORDER,"==================>%s()\n",__FUNCTION__); if(pRxTs->RxTimeoutIndicateSeq != 0xffff) { // Indicate the pending packets sequentially according to SeqNum until meet the gap. while(!list_empty(&pRxTs->RxPendingPktList)) { pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTs->RxPendingPktList.prev,RX_REORDER_ENTRY,List); if(index == 0) pRxTs->RxIndicateSeq = pReorderEntry->SeqNum; if( SN_LESS(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) || SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) ) { list_del_init(&pReorderEntry->List); if(SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq)) pRxTs->RxIndicateSeq = (pRxTs->RxIndicateSeq + 1) % 4096; IEEE80211_DEBUG(IEEE80211_DL_REORDER,"RxPktPendingTimeout(): IndicateSeq: %d\n", pReorderEntry->SeqNum); stats_IndicateArray[index] = pReorderEntry->prxb; index++; list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List); } else { bPktInBuf = true; break; } } } if(index>0) { // Set RxTimeoutIndicateSeq to 0xffff to indicate no pending packets in buffer now. pRxTs->RxTimeoutIndicateSeq = 0xffff; // Indicate packets if(index > REORDER_WIN_SIZE){ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n"); spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); return; } ieee80211_indicate_packets(ieee, stats_IndicateArray, index); } if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff)) { pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq; mod_timer(&pRxTs->RxPktPendingTimer, jiffies + MSECS(ieee->pHTInfo->RxReorderPendingTime)); } spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); //PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK); } /******************************************************************************************************************** *function: Add BA timer function * input: unsigned long data //acturally we send TX_TS_RECORD or RX_TS_RECORD to these timer * return: NULL * notice: ********************************************************************************************************************/ void TsAddBaProcess(unsigned long data) { PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data; u8 num = pTxTs->num; struct ieee80211_device *ieee = container_of(pTxTs, struct ieee80211_device, TxTsRecord[num]); TsInitAddBA(ieee, pTxTs, BA_POLICY_IMMEDIATE, false); IEEE80211_DEBUG(IEEE80211_DL_BA, "TsAddBaProcess(): ADDBA Req is started!! \n"); } void ResetTsCommonInfo(PTS_COMMON_INFO pTsCommonInfo) { memset(pTsCommonInfo->Addr, 0, 6); memset(&pTsCommonInfo->TSpec, 0, sizeof(TSPEC_BODY)); memset(&pTsCommonInfo->TClass, 0, sizeof(QOS_TCLAS)*TCLAS_NUM); pTsCommonInfo->TClasProc = 0; pTsCommonInfo->TClasNum = 0; } void ResetTxTsEntry(PTX_TS_RECORD pTS) { ResetTsCommonInfo(&pTS->TsCommonInfo); pTS->TxCurSeq = 0; pTS->bAddBaReqInProgress = false; pTS->bAddBaReqDelayed = false; pTS->bUsingBa = false; ResetBaEntry(&pTS->TxAdmittedBARecord); //For BA Originator ResetBaEntry(&pTS->TxPendingBARecord); } void ResetRxTsEntry(PRX_TS_RECORD pTS) { ResetTsCommonInfo(&pTS->TsCommonInfo); pTS->RxIndicateSeq = 0xffff; // This indicate the RxIndicateSeq is not used now!! pTS->RxTimeoutIndicateSeq = 0xffff; // This indicate the RxTimeoutIndicateSeq is not used now!! ResetBaEntry(&pTS->RxAdmittedBARecord); // For BA Recipient } void TSInitialize(struct ieee80211_device *ieee) { PTX_TS_RECORD pTxTS = ieee->TxTsRecord; PRX_TS_RECORD pRxTS = ieee->RxTsRecord; PRX_REORDER_ENTRY pRxReorderEntry = ieee->RxReorderEntry; u8 count = 0; IEEE80211_DEBUG(IEEE80211_DL_TS, "==========>%s()\n", __FUNCTION__); // Initialize Tx TS related info. INIT_LIST_HEAD(&ieee->Tx_TS_Admit_List); INIT_LIST_HEAD(&ieee->Tx_TS_Pending_List); INIT_LIST_HEAD(&ieee->Tx_TS_Unused_List); for(count = 0; count < TOTAL_TS_NUM; count++) { // pTxTS->num = count; // The timers for the operation of Traffic Stream and Block Ack. // DLS related timer will be add here in the future!! init_timer(&pTxTS->TsCommonInfo.SetupTimer); pTxTS->TsCommonInfo.SetupTimer.data = (unsigned long)pTxTS; pTxTS->TsCommonInfo.SetupTimer.function = TsSetupTimeOut; init_timer(&pTxTS->TsCommonInfo.InactTimer); pTxTS->TsCommonInfo.InactTimer.data = (unsigned long)pTxTS; pTxTS->TsCommonInfo.InactTimer.function = TsInactTimeout; init_timer(&pTxTS->TsAddBaTimer); pTxTS->TsAddBaTimer.data = (unsigned long)pTxTS; pTxTS->TsAddBaTimer.function = TsAddBaProcess; init_timer(&pTxTS->TxPendingBARecord.Timer); pTxTS->TxPendingBARecord.Timer.data = (unsigned long)pTxTS; pTxTS->TxPendingBARecord.Timer.function = BaSetupTimeOut; init_timer(&pTxTS->TxAdmittedBARecord.Timer); pTxTS->TxAdmittedBARecord.Timer.data = (unsigned long)pTxTS; pTxTS->TxAdmittedBARecord.Timer.function = TxBaInactTimeout; ResetTxTsEntry(pTxTS); list_add_tail(&pTxTS->TsCommonInfo.List, &ieee->Tx_TS_Unused_List); pTxTS++; } // Initialize Rx TS related info. INIT_LIST_HEAD(&ieee->Rx_TS_Admit_List); INIT_LIST_HEAD(&ieee->Rx_TS_Pending_List); INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List); for(count = 0; count < TOTAL_TS_NUM; count++) { pRxTS->num = count; INIT_LIST_HEAD(&pRxTS->RxPendingPktList); init_timer(&pRxTS->TsCommonInfo.SetupTimer); pRxTS->TsCommonInfo.SetupTimer.data = (unsigned long)pRxTS; pRxTS->TsCommonInfo.SetupTimer.function = TsSetupTimeOut; init_timer(&pRxTS->TsCommonInfo.InactTimer); pRxTS->TsCommonInfo.InactTimer.data = (unsigned long)pRxTS; pRxTS->TsCommonInfo.InactTimer.function = TsInactTimeout; init_timer(&pRxTS->RxAdmittedBARecord.Timer); pRxTS->RxAdmittedBARecord.Timer.data = (unsigned long)pRxTS; pRxTS->RxAdmittedBARecord.Timer.function = RxBaInactTimeout; init_timer(&pRxTS->RxPktPendingTimer); pRxTS->RxPktPendingTimer.data = (unsigned long)pRxTS; pRxTS->RxPktPendingTimer.function = RxPktPendingTimeout; ResetRxTsEntry(pRxTS); list_add_tail(&pRxTS->TsCommonInfo.List, &ieee->Rx_TS_Unused_List); pRxTS++; } // Initialize unused Rx Reorder List. INIT_LIST_HEAD(&ieee->RxReorder_Unused_List); //#ifdef TO_DO_LIST for(count = 0; count < REORDER_ENTRY_NUM; count++) { list_add_tail( &pRxReorderEntry->List,&ieee->RxReorder_Unused_List); if(count == (REORDER_ENTRY_NUM-1)) break; pRxReorderEntry = &ieee->RxReorderEntry[count+1]; } //#endif } void AdmitTS(struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, u32 InactTime) { del_timer_sync(&pTsCommonInfo->SetupTimer); del_timer_sync(&pTsCommonInfo->InactTimer); if(InactTime!=0) mod_timer(&pTsCommonInfo->InactTimer, jiffies + MSECS(InactTime)); } PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8* Addr, u8 TID, TR_SELECT TxRxSelect) { //DIRECTION_VALUE dir; u8 dir; bool search_dir[4] = {0, 0, 0, 0}; struct list_head* psearch_list; //FIXME PTS_COMMON_INFO pRet = NULL; if(ieee->iw_mode == IW_MODE_MASTER) //ap mode { if(TxRxSelect == TX_DIR) { search_dir[DIR_DOWN] = true; search_dir[DIR_BI_DIR]= true; } else { search_dir[DIR_UP] = true; search_dir[DIR_BI_DIR]= true; } } else if(ieee->iw_mode == IW_MODE_ADHOC) { if(TxRxSelect == TX_DIR) search_dir[DIR_UP] = true; else search_dir[DIR_DOWN] = true; } else { if(TxRxSelect == TX_DIR) { search_dir[DIR_UP] = true; search_dir[DIR_BI_DIR]= true; search_dir[DIR_DIRECT]= true; } else { search_dir[DIR_DOWN] = true; search_dir[DIR_BI_DIR]= true; search_dir[DIR_DIRECT]= true; } } if(TxRxSelect == TX_DIR) psearch_list = &ieee->Tx_TS_Admit_List; else psearch_list = &ieee->Rx_TS_Admit_List; //for(dir = DIR_UP; dir <= DIR_BI_DIR; dir++) for(dir = 0; dir <= DIR_BI_DIR; dir++) { if(search_dir[dir] ==false ) continue; list_for_each_entry(pRet, psearch_list, List){ // IEEE80211_DEBUG(IEEE80211_DL_TS, "ADD:%pM, TID:%d, dir:%d\n", pRet->Addr, pRet->TSpec.f.TSInfo.field.ucTSID, pRet->TSpec.f.TSInfo.field.ucDirection); if (memcmp(pRet->Addr, Addr, 6) == 0) if (pRet->TSpec.f.TSInfo.field.ucTSID == TID) if(pRet->TSpec.f.TSInfo.field.ucDirection == dir) { // printk("Bingo! got it\n"); break; } } if(&pRet->List != psearch_list) break; } if(&pRet->List != psearch_list){ return pRet ; } else return NULL; } void MakeTSEntry( PTS_COMMON_INFO pTsCommonInfo, u8* Addr, PTSPEC_BODY pTSPEC, PQOS_TCLAS pTCLAS, u8 TCLAS_Num, u8 TCLAS_Proc ) { u8 count; if(pTsCommonInfo == NULL) return; memcpy(pTsCommonInfo->Addr, Addr, 6); if(pTSPEC != NULL) memcpy((u8*)(&(pTsCommonInfo->TSpec)), (u8*)pTSPEC, sizeof(TSPEC_BODY)); for(count = 0; count < TCLAS_Num; count++) memcpy((u8*)(&(pTsCommonInfo->TClass[count])), (u8*)pTCLAS, sizeof(QOS_TCLAS)); pTsCommonInfo->TClasProc = TCLAS_Proc; pTsCommonInfo->TClasNum = TCLAS_Num; } bool GetTs( struct ieee80211_device* ieee, PTS_COMMON_INFO *ppTS, u8* Addr, u8 TID, TR_SELECT TxRxSelect, //Rx:1, Tx:0 bool bAddNewTs ) { u8 UP = 0; // // We do not build any TS for Broadcast or Multicast stream. // So reject these kinds of search here. // if(is_broadcast_ether_addr(Addr) || is_multicast_ether_addr(Addr)) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "get TS for Broadcast or Multicast\n"); return false; } if (ieee->current_network.qos_data.supported == 0) UP = 0; else { // In WMM case: we use 4 TID only if (!IsACValid(TID)) { IEEE80211_DEBUG(IEEE80211_DL_ERR, " in %s(), TID(%d) is not valid\n", __FUNCTION__, TID); return false; } switch(TID) { case 0: case 3: UP = 0; break; case 1: case 2: UP = 2; break; case 4: case 5: UP = 5; break; case 6: case 7: UP = 7; break; } } *ppTS = SearchAdmitTRStream( ieee, Addr, UP, TxRxSelect); if(*ppTS != NULL) { return true; } else { if(bAddNewTs == false) { IEEE80211_DEBUG(IEEE80211_DL_TS, "add new TS failed(tid:%d)\n", UP); return false; } else { // // Create a new Traffic stream for current Tx/Rx // This is for EDCA and WMM to add a new TS. // For HCCA or WMMSA, TS cannot be addmit without negotiation. // TSPEC_BODY TSpec; PQOS_TSINFO pTSInfo = &TSpec.f.TSInfo; struct list_head* pUnusedList = (TxRxSelect == TX_DIR)? (&ieee->Tx_TS_Unused_List): (&ieee->Rx_TS_Unused_List); struct list_head* pAddmitList = (TxRxSelect == TX_DIR)? (&ieee->Tx_TS_Admit_List): (&ieee->Rx_TS_Admit_List); DIRECTION_VALUE Dir = (ieee->iw_mode == IW_MODE_MASTER)? ((TxRxSelect==TX_DIR)?DIR_DOWN:DIR_UP): ((TxRxSelect==TX_DIR)?DIR_UP:DIR_DOWN); IEEE80211_DEBUG(IEEE80211_DL_TS, "to add Ts\n"); if(!list_empty(pUnusedList)) { (*ppTS) = list_entry(pUnusedList->next, TS_COMMON_INFO, List); list_del_init(&(*ppTS)->List); if(TxRxSelect==TX_DIR) { PTX_TS_RECORD tmp = container_of(*ppTS, TX_TS_RECORD, TsCommonInfo); ResetTxTsEntry(tmp); } else{ PRX_TS_RECORD tmp = container_of(*ppTS, RX_TS_RECORD, TsCommonInfo); ResetRxTsEntry(tmp); } IEEE80211_DEBUG(IEEE80211_DL_TS, "to init current TS, UP:%d, Dir:%d, addr:%pM\n", UP, Dir, Addr); // Prepare TS Info releated field pTSInfo->field.ucTrafficType = 0; // Traffic type: WMM is reserved in this field pTSInfo->field.ucTSID = UP; // TSID pTSInfo->field.ucDirection = Dir; // Direction: if there is DirectLink, this need additional consideration. pTSInfo->field.ucAccessPolicy = 1; // Access policy pTSInfo->field.ucAggregation = 0; // Aggregation pTSInfo->field.ucPSB = 0; // Aggregation pTSInfo->field.ucUP = UP; // User priority pTSInfo->field.ucTSInfoAckPolicy = 0; // Ack policy pTSInfo->field.ucSchedule = 0; // Schedule MakeTSEntry(*ppTS, Addr, &TSpec, NULL, 0, 0); AdmitTS(ieee, *ppTS, 0); list_add_tail(&((*ppTS)->List), pAddmitList); // if there is DirectLink, we need to do additional operation here!! return true; } else { IEEE80211_DEBUG(IEEE80211_DL_ERR, "in function %s() There is not enough TS record to be used!!", __FUNCTION__); return false; } } } } void RemoveTsEntry( struct ieee80211_device* ieee, PTS_COMMON_INFO pTs, TR_SELECT TxRxSelect ) { //u32 flags = 0; unsigned long flags = 0; del_timer_sync(&pTs->SetupTimer); del_timer_sync(&pTs->InactTimer); TsInitDelBA(ieee, pTs, TxRxSelect); if(TxRxSelect == RX_DIR) { //#ifdef TO_DO_LIST PRX_REORDER_ENTRY pRxReorderEntry; PRX_TS_RECORD pRxTS = (PRX_TS_RECORD)pTs; if(timer_pending(&pRxTS->RxPktPendingTimer)) del_timer_sync(&pRxTS->RxPktPendingTimer); while(!list_empty(&pRxTS->RxPendingPktList)) { // PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK); spin_lock_irqsave(&(ieee->reorder_spinlock), flags); //pRxReorderEntry = list_entry(&pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List); pRxReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List); list_del_init(&pRxReorderEntry->List); { int i = 0; struct ieee80211_rxb * prxb = pRxReorderEntry->prxb; if (unlikely(!prxb)) { spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); return; } for(i =0; i < prxb->nr_subframes; i++) { dev_kfree_skb(prxb->subframes[i]); } kfree(prxb); prxb = NULL; } list_add_tail(&pRxReorderEntry->List,&ieee->RxReorder_Unused_List); //PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK); spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); } //#endif } else { PTX_TS_RECORD pTxTS = (PTX_TS_RECORD)pTs; del_timer_sync(&pTxTS->TsAddBaTimer); } } void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr) { PTS_COMMON_INFO pTS, pTmpTS; printk("===========>RemovePeerTS,%pM\n", Addr); list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) { if (memcmp(pTS->Addr, Addr, 6) == 0) { RemoveTsEntry(ieee, pTS, TX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List); } } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) { if (memcmp(pTS->Addr, Addr, 6) == 0) { printk("====>remove Tx_TS_admin_list\n"); RemoveTsEntry(ieee, pTS, TX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List); } } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) { if (memcmp(pTS->Addr, Addr, 6) == 0) { RemoveTsEntry(ieee, pTS, RX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List); } } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) { if (memcmp(pTS->Addr, Addr, 6) == 0) { RemoveTsEntry(ieee, pTS, RX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List); } } } void RemoveAllTS(struct ieee80211_device* ieee) { PTS_COMMON_INFO pTS, pTmpTS; list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) { RemoveTsEntry(ieee, pTS, TX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List); } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) { RemoveTsEntry(ieee, pTS, TX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List); } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) { RemoveTsEntry(ieee, pTS, RX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List); } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) { RemoveTsEntry(ieee, pTS, RX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List); } } void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS) { if(pTxTS->bAddBaReqInProgress == false) { pTxTS->bAddBaReqInProgress = true; if(pTxTS->bAddBaReqDelayed) { IEEE80211_DEBUG(IEEE80211_DL_BA, "TsStartAddBaProcess(): Delayed Start ADDBA after 60 sec!!\n"); mod_timer(&pTxTS->TsAddBaTimer, jiffies + MSECS(TS_ADDBA_DELAY)); } else { IEEE80211_DEBUG(IEEE80211_DL_BA,"TsStartAddBaProcess(): Immediately Start ADDBA now!!\n"); mod_timer(&pTxTS->TsAddBaTimer, jiffies+10); //set 10 ticks } } else IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s()==>BA timer is already added\n", __FUNCTION__); }
gpl-2.0
lyfkevin/Wind_iproj_JB_kernel
arch/microblaze/pci/xilinx_pci.c
7981
4579
/* * PCI support for Xilinx plbv46_pci soft-core which can be used on * Xilinx Virtex ML410 / ML510 boards. * * Copyright 2009 Roderick Colenbrander * Copyright 2009 Secret Lab Technologies Ltd. * * The pci bridge fixup code was copied from ppc4xx_pci.c and was written * by Benjamin Herrenschmidt. * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/ioport.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/pci.h> #include <asm/io.h> #define XPLB_PCI_ADDR 0x10c #define XPLB_PCI_DATA 0x110 #define XPLB_PCI_BUS 0x114 #define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \ PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY) static struct of_device_id xilinx_pci_match[] = { { .compatible = "xlnx,plbv46-pci-1.03.a", }, {} }; /** * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration. */ static void xilinx_pci_fixup_bridge(struct pci_dev *dev) { struct pci_controller *hose; int i; if (dev->devfn || dev->bus->self) return; hose = pci_bus_to_host(dev->bus); if (!hose) return; if (!of_match_node(xilinx_pci_match, hose->dn)) return; /* Hide the PCI host BARs from the kernel as their content doesn't * fit well in the resource management */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; dev->resource[i].flags = 0; } dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n", pci_name(dev)); } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge); #ifdef DEBUG /** * xilinx_pci_exclude_device - Don't do config access for non-root bus * * This is a hack. Config access to any bus other than bus 0 does not * currently work on the ML510 so we prevent it here. */ static int xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn) { return (bus != 0); } /** * xilinx_early_pci_scan - List pci config space for available devices * * List pci devices in very early phase. */ void __init xilinx_early_pci_scan(struct pci_controller *hose) { u32 bus = 0; u32 val, dev, func, offset; /* Currently we have only 2 device connected - up-to 32 devices */ for (dev = 0; dev < 2; dev++) { /* List only first function number - up-to 8 functions */ for (func = 0; func < 1; func++) { printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func); /* read the first 64 standardized bytes */ /* Up-to 192 bytes can be list of capabilities */ for (offset = 0; offset < 64; offset += 4) { early_read_config_dword(hose, bus, PCI_DEVFN(dev, func), offset, &val); if (offset == 0 && val == 0xFFFFFFFF) { printk(KERN_CONT "\nABSENT"); break; } if (!(offset % 0x10)) printk(KERN_CONT "\n%04x: ", offset); printk(KERN_CONT "%08x ", val); } printk(KERN_INFO "\n"); } } } #else void __init xilinx_early_pci_scan(struct pci_controller *hose) { } #endif /** * xilinx_pci_init - Find and register a Xilinx PCI host bridge */ void __init xilinx_pci_init(void) { struct pci_controller *hose; struct resource r; void __iomem *pci_reg; struct device_node *pci_node; pci_node = of_find_matching_node(NULL, xilinx_pci_match); if (!pci_node) return; if (of_address_to_resource(pci_node, 0, &r)) { pr_err("xilinx-pci: cannot resolve base address\n"); return; } hose = pcibios_alloc_controller(pci_node); if (!hose) { pr_err("xilinx-pci: pcibios_alloc_controller() failed\n"); return; } /* Setup config space */ setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR, r.start + XPLB_PCI_DATA, INDIRECT_TYPE_SET_CFG_TYPE); /* According to the xilinx plbv46_pci documentation the soft-core starts * a self-init when the bus master enable bit is set. Without this bit * set the pci bus can't be scanned. */ early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD); /* Set the max latency timer to 255 */ early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff); /* Set the max bus number to 255, and bus/subbus no's to 0 */ pci_reg = of_iomap(pci_node, 0); out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff); iounmap(pci_reg); /* Register the host bridge with the linux kernel! */ pci_process_bridge_OF_ranges(hose, pci_node, INDIRECT_TYPE_SET_CFG_TYPE); pr_info("xilinx-pci: Registered PCI host bridge\n"); xilinx_early_pci_scan(hose); }
gpl-2.0
hiikezoe/android_kernel_asus_tf300t
kernel/power/suspend_time.c
8749
2724
/* * debugfs file to track time spent in suspend * * Copyright (c) 2011, Google, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/debugfs.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/seq_file.h> #include <linux/syscore_ops.h> #include <linux/time.h> static struct timespec suspend_time_before; static unsigned int time_in_suspend_bins[32]; #ifdef CONFIG_DEBUG_FS static int suspend_time_debug_show(struct seq_file *s, void *data) { int bin; seq_printf(s, "time (secs) count\n"); seq_printf(s, "------------------\n"); for (bin = 0; bin < 32; bin++) { if (time_in_suspend_bins[bin] == 0) continue; seq_printf(s, "%4d - %4d %4u\n", bin ? 1 << (bin - 1) : 0, 1 << bin, time_in_suspend_bins[bin]); } return 0; } static int suspend_time_debug_open(struct inode *inode, struct file *file) { return single_open(file, suspend_time_debug_show, NULL); } static const struct file_operations suspend_time_debug_fops = { .open = suspend_time_debug_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init suspend_time_debug_init(void) { struct dentry *d; d = debugfs_create_file("suspend_time", 0755, NULL, NULL, &suspend_time_debug_fops); if (!d) { pr_err("Failed to create suspend_time debug file\n"); return -ENOMEM; } return 0; } late_initcall(suspend_time_debug_init); #endif static int suspend_time_syscore_suspend(void) { read_persistent_clock(&suspend_time_before); return 0; } static void suspend_time_syscore_resume(void) { struct timespec after; read_persistent_clock(&after); after = timespec_sub(after, suspend_time_before); time_in_suspend_bins[fls(after.tv_sec)]++; pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec, after.tv_nsec / NSEC_PER_MSEC); } static struct syscore_ops suspend_time_syscore_ops = { .suspend = suspend_time_syscore_suspend, .resume = suspend_time_syscore_resume, }; static int suspend_time_syscore_init(void) { register_syscore_ops(&suspend_time_syscore_ops); return 0; } static void suspend_time_syscore_exit(void) { unregister_syscore_ops(&suspend_time_syscore_ops); } module_init(suspend_time_syscore_init); module_exit(suspend_time_syscore_exit);
gpl-2.0
Phreya/phreya_kernel_oneplus_msm8974
drivers/net/wireless/orinoco/airport.c
9261
6252
/* airport.c * * A driver for "Hermes" chipset based Apple Airport wireless * card. * * Copyright notice & release notes in file main.c * * Note specific to airport stub: * * 0.05 : first version of the new split driver * 0.06 : fix possible hang on powerup, add sleep support */ #define DRIVER_NAME "airport" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/pmac_feature.h> #include "orinoco.h" #define AIRPORT_IO_LEN (0x1000) /* one page */ struct airport { struct macio_dev *mdev; void __iomem *vaddr; unsigned int irq; int irq_requested; int ndev_registered; }; static int airport_suspend(struct macio_dev *mdev, pm_message_t state) { struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev); struct net_device *dev = priv->ndev; struct airport *card = priv->card; unsigned long flags; int err; printk(KERN_DEBUG "%s: Airport entering sleep mode\n", dev->name); err = orinoco_lock(priv, &flags); if (err) { printk(KERN_ERR "%s: hw_unavailable on PBOOK_SLEEP_NOW\n", dev->name); return 0; } orinoco_down(priv); orinoco_unlock(priv, &flags); disable_irq(card->irq); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 0); return 0; } static int airport_resume(struct macio_dev *mdev) { struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev); struct net_device *dev = priv->ndev; struct airport *card = priv->card; unsigned long flags; int err; printk(KERN_DEBUG "%s: Airport waking up\n", dev->name); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 1); msleep(200); enable_irq(card->irq); priv->hw.ops->lock_irqsave(&priv->lock, &flags); err = orinoco_up(priv); priv->hw.ops->unlock_irqrestore(&priv->lock, &flags); return err; } static int airport_detach(struct macio_dev *mdev) { struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev); struct airport *card = priv->card; if (card->ndev_registered) orinoco_if_del(priv); card->ndev_registered = 0; if (card->irq_requested) free_irq(card->irq, priv); card->irq_requested = 0; if (card->vaddr) iounmap(card->vaddr); card->vaddr = NULL; macio_release_resource(mdev, 0); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 0); ssleep(1); macio_set_drvdata(mdev, NULL); free_orinocodev(priv); return 0; } static int airport_hard_reset(struct orinoco_private *priv) { /* It would be nice to power cycle the Airport for a real hard * reset, but for some reason although it appears to * re-initialize properly, it falls in a screaming heap * shortly afterwards. */ #if 0 struct airport *card = priv->card; /* Vitally important. If we don't do this it seems we get an * interrupt somewhere during the power cycle, since * hw_unavailable is already set it doesn't get ACKed, we get * into an interrupt loop and the PMU decides to turn us * off. */ disable_irq(card->irq); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(card->mdev), 0, 0); ssleep(1); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(card->mdev), 0, 1); ssleep(1); enable_irq(card->irq); ssleep(1); #endif return 0; } static int airport_attach(struct macio_dev *mdev, const struct of_device_id *match) { struct orinoco_private *priv; struct airport *card; unsigned long phys_addr; struct hermes *hw; if (macio_resource_count(mdev) < 1 || macio_irq_count(mdev) < 1) { printk(KERN_ERR PFX "Wrong interrupt/addresses in OF tree\n"); return -ENODEV; } /* Allocate space for private device-specific data */ priv = alloc_orinocodev(sizeof(*card), &mdev->ofdev.dev, airport_hard_reset, NULL); if (!priv) { printk(KERN_ERR PFX "Cannot allocate network device\n"); return -ENODEV; } card = priv->card; hw = &priv->hw; card->mdev = mdev; if (macio_request_resource(mdev, 0, DRIVER_NAME)) { printk(KERN_ERR PFX "can't request IO resource !\n"); free_orinocodev(priv); return -EBUSY; } macio_set_drvdata(mdev, priv); /* Setup interrupts & base address */ card->irq = macio_irq(mdev, 0); phys_addr = macio_resource_start(mdev, 0); /* Physical address */ printk(KERN_DEBUG PFX "Physical address %lx\n", phys_addr); card->vaddr = ioremap(phys_addr, AIRPORT_IO_LEN); if (!card->vaddr) { printk(KERN_ERR PFX "ioremap() failed\n"); goto failed; } hermes_struct_init(hw, card->vaddr, HERMES_16BIT_REGSPACING); /* Power up card */ pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 1); ssleep(1); /* Reset it before we get the interrupt */ hw->ops->init(hw); if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) { printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq); goto failed; } card->irq_requested = 1; /* Initialise the main driver */ if (orinoco_init(priv) != 0) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto failed; } /* Register an interface with the stack */ if (orinoco_if_add(priv, phys_addr, card->irq, NULL) != 0) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto failed; } card->ndev_registered = 1; return 0; failed: airport_detach(mdev); return -ENODEV; } /* airport_attach */ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION " (Benjamin Herrenschmidt <benh@kernel.crashing.org>)"; MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("Driver for the Apple Airport wireless card."); MODULE_LICENSE("Dual MPL/GPL"); static struct of_device_id airport_match[] = { { .name = "radio", }, {}, }; MODULE_DEVICE_TABLE(of, airport_match); static struct macio_driver airport_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = airport_match, }, .probe = airport_attach, .remove = airport_detach, .suspend = airport_suspend, .resume = airport_resume, }; static int __init init_airport(void) { printk(KERN_DEBUG "%s\n", version); return macio_register_driver(&airport_driver); } static void __exit exit_airport(void) { macio_unregister_driver(&airport_driver); } module_init(init_airport); module_exit(exit_airport);
gpl-2.0
bagnz0r/GT-I8160_Kernel
drivers/clocksource/mmio.c
10797
1904
/* * Generic MMIO clocksource support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clocksource.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> struct clocksource_mmio { void __iomem *reg; struct clocksource clksrc; }; static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c) { return container_of(c, struct clocksource_mmio, clksrc); } cycle_t clocksource_mmio_readl_up(struct clocksource *c) { return readl_relaxed(to_mmio_clksrc(c)->reg); } cycle_t clocksource_mmio_readl_down(struct clocksource *c) { return ~readl_relaxed(to_mmio_clksrc(c)->reg); } cycle_t clocksource_mmio_readw_up(struct clocksource *c) { return readw_relaxed(to_mmio_clksrc(c)->reg); } cycle_t clocksource_mmio_readw_down(struct clocksource *c) { return ~(unsigned)readw_relaxed(to_mmio_clksrc(c)->reg); } /** * clocksource_mmio_init - Initialize a simple mmio based clocksource * @base: Virtual address of the clock readout register * @name: Name of the clocksource * @hz: Frequency of the clocksource in Hz * @rating: Rating of the clocksource * @bits: Number of valid bits * @read: One of clocksource_mmio_read*() above */ int __init clocksource_mmio_init(void __iomem *base, const char *name, unsigned long hz, int rating, unsigned bits, cycle_t (*read)(struct clocksource *)) { struct clocksource_mmio *cs; if (bits > 32 || bits < 16) return -EINVAL; cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL); if (!cs) return -ENOMEM; cs->reg = base; cs->clksrc.name = name; cs->clksrc.rating = rating; cs->clksrc.read = read; cs->clksrc.mask = CLOCKSOURCE_MASK(bits); cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; return clocksource_register_hz(&cs->clksrc, hz); }
gpl-2.0
drxaero/linux
drivers/infiniband/core/agent.c
11053
6380
/* * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/slab.h> #include <linux/string.h> #include "agent.h" #include "smi.h" #include "mad_priv.h" #define SPFX "ib_agent: " struct ib_agent_port_private { struct list_head port_list; struct ib_mad_agent *agent[2]; }; static DEFINE_SPINLOCK(ib_agent_port_list_lock); static LIST_HEAD(ib_agent_port_list); static struct ib_agent_port_private * __ib_get_agent_port(struct ib_device *device, int port_num) { struct ib_agent_port_private *entry; list_for_each_entry(entry, &ib_agent_port_list, port_list) { if (entry->agent[1]->device == device && entry->agent[1]->port_num == port_num) return entry; } return NULL; } static struct ib_agent_port_private * ib_get_agent_port(struct ib_device *device, int port_num) { struct ib_agent_port_private *entry; unsigned long flags; spin_lock_irqsave(&ib_agent_port_list_lock, flags); entry = __ib_get_agent_port(device, port_num); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); return entry; } void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, struct ib_wc *wc, struct ib_device *device, int port_num, int qpn) { struct ib_agent_port_private *port_priv; struct ib_mad_agent *agent; struct ib_mad_send_buf *send_buf; struct ib_ah *ah; struct ib_mad_send_wr_private *mad_send_wr; if (device->node_type == RDMA_NODE_IB_SWITCH) port_priv = ib_get_agent_port(device, 0); else port_priv = ib_get_agent_port(device, port_num); if (!port_priv) { printk(KERN_ERR SPFX "Unable to find port agent\n"); return; } agent = port_priv->agent[qpn]; ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); if (IS_ERR(ah)) { printk(KERN_ERR SPFX "ib_create_ah_from_wc error %ld\n", PTR_ERR(ah)); return; } send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_KERNEL); if (IS_ERR(send_buf)) { printk(KERN_ERR SPFX "ib_create_send_mad error\n"); goto err1; } memcpy(send_buf->mad, mad, sizeof *mad); send_buf->ah = ah; if (device->node_type == RDMA_NODE_IB_SWITCH) { mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_send_wr->send_wr.wr.ud.port_num = port_num; } if (ib_post_send_mad(send_buf, NULL)) { printk(KERN_ERR SPFX "ib_post_send_mad error\n"); goto err2; } return; err2: ib_free_send_mad(send_buf); err1: ib_destroy_ah(ah); } static void agent_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { ib_destroy_ah(mad_send_wc->send_buf->ah); ib_free_send_mad(mad_send_wc->send_buf); } int ib_agent_port_open(struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; unsigned long flags; int ret; /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); ret = -ENOMEM; goto error1; } if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) { /* Obtain send only MAD agent for SMI QP */ port_priv->agent[0] = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, &agent_send_handler, NULL, NULL); if (IS_ERR(port_priv->agent[0])) { ret = PTR_ERR(port_priv->agent[0]); goto error2; } } /* Obtain send only MAD agent for GSI QP */ port_priv->agent[1] = ib_register_mad_agent(device, port_num, IB_QPT_GSI, NULL, 0, &agent_send_handler, NULL, NULL); if (IS_ERR(port_priv->agent[1])) { ret = PTR_ERR(port_priv->agent[1]); goto error3; } spin_lock_irqsave(&ib_agent_port_list_lock, flags); list_add_tail(&port_priv->port_list, &ib_agent_port_list); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); return 0; error3: if (port_priv->agent[0]) ib_unregister_mad_agent(port_priv->agent[0]); error2: kfree(port_priv); error1: return ret; } int ib_agent_port_close(struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; unsigned long flags; spin_lock_irqsave(&ib_agent_port_list_lock, flags); port_priv = __ib_get_agent_port(device, port_num); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); printk(KERN_ERR SPFX "Port %d not found\n", port_num); return -ENODEV; } list_del(&port_priv->port_list); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); ib_unregister_mad_agent(port_priv->agent[1]); if (port_priv->agent[0]) ib_unregister_mad_agent(port_priv->agent[0]); kfree(port_priv); return 0; }
gpl-2.0
agx/linux-wpan-next
drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
46
6048
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * Author: Nathan Rutman <nathan.rutman@sun.com> * * Kernel <-> userspace communication routines. * Using pipes for all arches. */ #define DEBUG_SUBSYSTEM S_CLASS #define D_KUC D_OTHER #include "../../include/linux/libcfs/libcfs.h" /* This is the kernel side (liblustre as well). */ /** * libcfs_kkuc_msg_put - send an message from kernel to userspace * @param fp to send the message to * @param payload Payload data. First field of payload is always * struct kuc_hdr */ int libcfs_kkuc_msg_put(struct file *filp, void *payload) { struct kuc_hdr *kuch = (struct kuc_hdr *)payload; ssize_t count = kuch->kuc_msglen; loff_t offset = 0; mm_segment_t fs; int rc = -ENOSYS; if (filp == NULL || IS_ERR(filp)) return -EBADF; if (kuch->kuc_magic != KUC_MAGIC) { CERROR("KernelComm: bad magic %x\n", kuch->kuc_magic); return -ENOSYS; } fs = get_fs(); set_fs(KERNEL_DS); while (count > 0) { rc = vfs_write(filp, (void __force __user *)payload, count, &offset); if (rc < 0) break; count -= rc; payload += rc; rc = 0; } set_fs(fs); if (rc < 0) CWARN("message send failed (%d)\n", rc); else CDEBUG(D_KUC, "Sent message rc=%d, fp=%p\n", rc, filp); return rc; } EXPORT_SYMBOL(libcfs_kkuc_msg_put); /* Broadcast groups are global across all mounted filesystems; * i.e. registering for a group on 1 fs will get messages for that * group from any fs */ /** A single group registration has a uid and a file pointer */ struct kkuc_reg { struct list_head kr_chain; int kr_uid; struct file *kr_fp; __u32 kr_data; }; static struct list_head kkuc_groups[KUC_GRP_MAX+1] = {}; /* Protect message sending against remove and adds */ static DECLARE_RWSEM(kg_sem); /** Add a receiver to a broadcast group * @param filp pipe to write into * @param uid identifier for this receiver * @param group group number */ int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group, __u32 data) { struct kkuc_reg *reg; if (group > KUC_GRP_MAX) { CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); return -EINVAL; } /* fput in group_rem */ if (filp == NULL) return -EBADF; /* freed in group_rem */ reg = kmalloc(sizeof(*reg), 0); if (reg == NULL) return -ENOMEM; reg->kr_fp = filp; reg->kr_uid = uid; reg->kr_data = data; down_write(&kg_sem); if (kkuc_groups[group].next == NULL) INIT_LIST_HEAD(&kkuc_groups[group]); list_add(&reg->kr_chain, &kkuc_groups[group]); up_write(&kg_sem); CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group); return 0; } EXPORT_SYMBOL(libcfs_kkuc_group_add); int libcfs_kkuc_group_rem(int uid, int group) { struct kkuc_reg *reg, *next; if (kkuc_groups[group].next == NULL) return 0; if (uid == 0) { /* Broadcast a shutdown message */ struct kuc_hdr lh; lh.kuc_magic = KUC_MAGIC; lh.kuc_transport = KUC_TRANSPORT_GENERIC; lh.kuc_msgtype = KUC_MSG_SHUTDOWN; lh.kuc_msglen = sizeof(lh); libcfs_kkuc_group_put(group, &lh); } down_write(&kg_sem); list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) { if ((uid == 0) || (uid == reg->kr_uid)) { list_del(&reg->kr_chain); CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n", reg->kr_uid, reg->kr_fp, group); if (reg->kr_fp != NULL) fput(reg->kr_fp); kfree(reg); } } up_write(&kg_sem); return 0; } EXPORT_SYMBOL(libcfs_kkuc_group_rem); int libcfs_kkuc_group_put(int group, void *payload) { struct kkuc_reg *reg; int rc = 0; int one_success = 0; down_read(&kg_sem); list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { if (reg->kr_fp != NULL) { rc = libcfs_kkuc_msg_put(reg->kr_fp, payload); if (rc == 0) one_success = 1; else if (rc == -EPIPE) { fput(reg->kr_fp); reg->kr_fp = NULL; } } } up_read(&kg_sem); /* don't return an error if the message has been delivered * at least to one agent */ if (one_success) rc = 0; return rc; } EXPORT_SYMBOL(libcfs_kkuc_group_put); /** * Calls a callback function for each link of the given kuc group. * @param group the group to call the function on. * @param cb_func the function to be called. * @param cb_arg iextra argument to be passed to the callback function. */ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func, void *cb_arg) { struct kkuc_reg *reg; int rc = 0; if (group > KUC_GRP_MAX) { CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); return -EINVAL; } /* no link for this group */ if (kkuc_groups[group].next == NULL) return 0; down_write(&kg_sem); list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { if (reg->kr_fp != NULL) rc = cb_func(reg->kr_data, cb_arg); } up_write(&kg_sem); return rc; } EXPORT_SYMBOL(libcfs_kkuc_group_foreach);
gpl-2.0
jcadduono/nethunter_kernel_noblelte
drivers/video/exynos/decon_dual_display/hdmi_cec_ctrl.c
558
5367
/* linux/drivers/media/video/samsung/tvout/hw_if/cec.c * * Copyright (c) 2009 Samsung Electronics * http://www.samsung.com/ * * cec ftn file for Samsung TVOUT driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <linux/videodev2_exynos_media.h> #include <linux/irqreturn.h> #include <linux/stddef.h> #include <linux/of_address.h> #include "regs-cec.h" #include "cec.h" #define S5P_HDMI_FIN 24000000 #define CEC_DIV_RATIO 320000 #define CEC_MESSAGE_BROADCAST_MASK 0x0F #define CEC_MESSAGE_BROADCAST 0x0F #define CEC_FILTER_THRESHOLD 0x15 void __iomem *cec_base; void __iomem *pmu_regs; struct cec_rx_struct cec_rx_struct; struct cec_tx_struct cec_tx_struct; void s5p_cec_set_divider(void) { u32 div_ratio, reg, div_val; div_ratio = S5P_HDMI_FIN / CEC_DIV_RATIO - 1; reg = readl(pmu_regs); reg = (reg & ~(0x3FF << 16)) | (div_ratio << 16); writel(reg, pmu_regs); div_val = CEC_DIV_RATIO * 0.00005 - 1; writeb(0x0, cec_base + S5P_CES_DIVISOR_3); writeb(0x0, cec_base + S5P_CES_DIVISOR_2); writeb(0x0, cec_base + S5P_CES_DIVISOR_1); writeb(div_val, cec_base + S5P_CES_DIVISOR_0); } void s5p_cec_enable_rx(void) { u8 reg; reg = readb(cec_base + S5P_CES_RX_CTRL); reg |= S5P_CES_RX_CTRL_ENABLE; writeb(reg, cec_base + S5P_CES_RX_CTRL); } void s5p_cec_mask_rx_interrupts(void) { u8 reg; reg = readb(cec_base + S5P_CES_IRQ_MASK); reg |= S5P_CES_IRQ_RX_DONE; reg |= S5P_CES_IRQ_RX_ERROR; writeb(reg, cec_base + S5P_CES_IRQ_MASK); } void s5p_cec_unmask_rx_interrupts(void) { u8 reg; reg = readb(cec_base + S5P_CES_IRQ_MASK); reg &= ~S5P_CES_IRQ_RX_DONE; reg &= ~S5P_CES_IRQ_RX_ERROR; writeb(reg, cec_base + S5P_CES_IRQ_MASK); } void s5p_cec_mask_tx_interrupts(void) { u8 reg; reg = readb(cec_base + S5P_CES_IRQ_MASK); reg |= S5P_CES_IRQ_TX_DONE; reg |= S5P_CES_IRQ_TX_ERROR; writeb(reg, cec_base + S5P_CES_IRQ_MASK); } void s5p_cec_unmask_tx_interrupts(void) { u8 reg; reg = readb(cec_base + S5P_CES_IRQ_MASK); reg &= ~S5P_CES_IRQ_TX_DONE; reg &= ~S5P_CES_IRQ_TX_ERROR; writeb(reg, cec_base + S5P_CES_IRQ_MASK); } void s5p_cec_reset(void) { u8 reg; writeb(S5P_CES_RX_CTRL_RESET, cec_base + S5P_CES_RX_CTRL); writeb(S5P_CES_TX_CTRL_RESET, cec_base + S5P_CES_TX_CTRL); reg = readb(cec_base + 0xc4); reg &= ~0x1; writeb(reg, cec_base + 0xc4); } void s5p_cec_tx_reset(void) { writeb(S5P_CES_TX_CTRL_RESET, cec_base + S5P_CES_TX_CTRL); } void s5p_cec_rx_reset(void) { u8 reg; writeb(S5P_CES_RX_CTRL_RESET, cec_base + S5P_CES_RX_CTRL); reg = readb(cec_base + 0xc4); reg &= ~0x1; writeb(reg, cec_base + 0xc4); } void s5p_cec_threshold(void) { writeb(CEC_FILTER_THRESHOLD, cec_base + S5P_CES_RX_FILTER_TH); writeb(0, cec_base + S5P_CES_RX_FILTER_CTRL); } void s5p_cec_set_tx_state(enum cec_state state) { atomic_set(&cec_tx_struct.state, state); } void s5p_cec_set_rx_state(enum cec_state state) { atomic_set(&cec_rx_struct.state, state); } void s5p_cec_copy_packet(char *data, size_t count) { int i = 0; u8 reg; while (i < count) { writeb(data[i], cec_base + (S5P_CES_TX_BUFF0 + (i * 4))); i++; } writeb(count, cec_base + S5P_CES_TX_BYTES); s5p_cec_set_tx_state(STATE_TX); reg = readb(cec_base + S5P_CES_TX_CTRL); reg |= S5P_CES_TX_CTRL_START; if ((data[0] & CEC_MESSAGE_BROADCAST_MASK) == CEC_MESSAGE_BROADCAST) reg |= S5P_CES_TX_CTRL_BCAST; else reg &= ~S5P_CES_TX_CTRL_BCAST; reg |= 0x50; writeb(reg, cec_base + S5P_CES_TX_CTRL); } void s5p_cec_set_addr(u32 addr) { writeb(addr & 0x0F, cec_base + S5P_CES_LOGIC_ADDR); } u32 s5p_cec_get_status(void) { u32 status = 0; status = readb(cec_base + S5P_CES_STATUS_0); status |= readb(cec_base + S5P_CES_STATUS_1) << 8; status |= readb(cec_base + S5P_CES_STATUS_2) << 16; status |= readb(cec_base + S5P_CES_STATUS_3) << 24; tvout_dbg("status = 0x%x!\n", status); return status; } void s5p_clr_pending_tx(void) { writeb(S5P_CES_IRQ_TX_DONE | S5P_CES_IRQ_TX_ERROR, cec_base + S5P_CES_IRQ_CLEAR); } void s5p_clr_pending_rx(void) { writeb(S5P_CES_IRQ_RX_DONE | S5P_CES_IRQ_RX_ERROR, cec_base + S5P_CES_IRQ_CLEAR); } void s5p_cec_get_rx_buf(u32 size, u8 *buffer) { u32 i = 0; while (i < size) { buffer[i] = readb(cec_base + S5P_CES_RX_BUFF0 + (i * 4)); i++; } } int s5p_cec_mem_probe(struct platform_device *pdev) { struct device_node *hdmiphy_sys; struct resource *res; int ret = 0; dev_dbg(&pdev->dev, "%s\n", __func__); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get memory region resource for cec\n"); return -ENXIO; } cec_base = devm_request_and_ioremap(&pdev->dev, res); if (cec_base == NULL) { dev_err(&pdev->dev, "failed to claim register region for hdmicec\n"); return -ENOENT; } hdmiphy_sys = of_get_child_by_name(pdev->dev.of_node, "hdmiphy-sys"); if (!hdmiphy_sys) { dev_err(&pdev->dev, "No sys-controller interface for hdmiphy\n"); return -ENODEV; } pmu_regs = of_iomap(hdmiphy_sys, 0); if (pmu_regs == NULL) { dev_err(&pdev->dev, "Can't get hdmiphy pmu control register\n"); return -ENOMEM; } return ret; }
gpl-2.0
TrustZoneGenericDriver/linux-xlnx
arch/arm/mach-pxa/mioa701.c
1582
20631
/* * Handles the Mitac Mio A701 Board * * Copyright (C) 2008 Robert Jarzmik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/syscore_ops.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/gpio_keys.h> #include <linux/pwm_backlight.h> #include <linux/rtc.h> #include <linux/leds.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pda_power.h> #include <linux/power_supply.h> #include <linux/wm97xx.h> #include <linux/mtd/physmap.h> #include <linux/usb/gpio_vbus.h> #include <linux/reboot.h> #include <linux/regulator/fixed.h> #include <linux/regulator/max1586.h> #include <linux/slab.h> #include <linux/i2c/pxa-i2c.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pxa27x.h> #include <mach/regs-rtc.h> #include <linux/platform_data/keypad-pxa27x.h> #include <linux/platform_data/video-pxafb.h> #include <linux/platform_data/mmc-pxamci.h> #include <mach/udc.h> #include <mach/pxa27x-udc.h> #include <linux/platform_data/camera-pxa.h> #include <mach/audio.h> #include <mach/smemc.h> #include <media/soc_camera.h> #include <mach/mioa701.h> #include "generic.h" #include "devices.h" static unsigned long mioa701_pin_config[] = { /* Mio global */ MIO_CFG_OUT(GPIO9_CHARGE_EN, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO18_POWEROFF, AF0, DRIVE_LOW), MFP_CFG_OUT(GPIO3, AF0, DRIVE_HIGH), MFP_CFG_OUT(GPIO4, AF0, DRIVE_HIGH), MIO_CFG_IN(GPIO80_MAYBE_CHARGE_VDROP, AF0), /* Backlight PWM 0 */ GPIO16_PWM0_OUT, /* MMC */ GPIO32_MMC_CLK, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, GPIO112_MMC_CMD, MIO_CFG_IN(GPIO78_SDIO_RO, AF0), MIO_CFG_IN(GPIO15_SDIO_INSERT, AF0), MIO_CFG_OUT(GPIO91_SDIO_EN, AF0, DRIVE_LOW), /* USB */ MIO_CFG_IN(GPIO13_nUSB_DETECT, AF0), MIO_CFG_OUT(GPIO22_USB_ENABLE, AF0, DRIVE_LOW), /* LCD */ GPIOxx_LCD_TFT_16BPP, /* QCI */ GPIO12_CIF_DD_7, GPIO17_CIF_DD_6, GPIO50_CIF_DD_3, GPIO51_CIF_DD_2, GPIO52_CIF_DD_4, GPIO53_CIF_MCLK, GPIO54_CIF_PCLK, GPIO55_CIF_DD_1, GPIO81_CIF_DD_0, GPIO82_CIF_DD_5, GPIO84_CIF_FV, GPIO85_CIF_LV, MIO_CFG_OUT(GPIO56_MT9M111_nOE, AF0, DRIVE_LOW), /* Bluetooth */ MIO_CFG_IN(GPIO14_BT_nACTIVITY, AF0), GPIO44_BTUART_CTS, GPIO42_BTUART_RXD, GPIO45_BTUART_RTS, GPIO43_BTUART_TXD, MIO_CFG_OUT(GPIO83_BT_ON, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO77_BT_UNKNOWN1, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO86_BT_MAYBE_nRESET, AF0, DRIVE_HIGH), /* GPS */ MIO_CFG_OUT(GPIO23_GPS_UNKNOWN1, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO26_GPS_ON, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO27_GPS_RESET, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO106_GPS_UNKNOWN2, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO107_GPS_UNKNOWN3, AF0, DRIVE_LOW), GPIO46_STUART_RXD, GPIO47_STUART_TXD, /* GSM */ MIO_CFG_OUT(GPIO24_GSM_MOD_RESET_CMD, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO88_GSM_nMOD_ON_CMD, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO90_GSM_nMOD_OFF_CMD, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO114_GSM_nMOD_DTE_UART_STATE, AF0, DRIVE_HIGH), MIO_CFG_IN(GPIO25_GSM_MOD_ON_STATE, AF0), MIO_CFG_IN(GPIO113_GSM_EVENT, AF0) | WAKEUP_ON_EDGE_BOTH, GPIO34_FFUART_RXD, GPIO35_FFUART_CTS, GPIO36_FFUART_DCD, GPIO37_FFUART_DSR, GPIO39_FFUART_TXD, GPIO40_FFUART_DTR, GPIO41_FFUART_RTS, /* Sound */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, GPIO89_AC97_SYSCLK, MIO_CFG_IN(GPIO12_HPJACK_INSERT, AF0), /* Leds */ MIO_CFG_OUT(GPIO10_LED_nCharging, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO97_LED_nBlue, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO98_LED_nOrange, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO82_LED_nVibra, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO115_LED_nKeyboard, AF0, DRIVE_HIGH), /* Keyboard */ MIO_CFG_IN(GPIO0_KEY_POWER, AF0) | WAKEUP_ON_EDGE_BOTH, MIO_CFG_IN(GPIO93_KEY_VOLUME_UP, AF0), MIO_CFG_IN(GPIO94_KEY_VOLUME_DOWN, AF0), GPIO100_KP_MKIN_0, GPIO101_KP_MKIN_1, GPIO102_KP_MKIN_2, GPIO103_KP_MKOUT_0, GPIO104_KP_MKOUT_1, GPIO105_KP_MKOUT_2, /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, /* Unknown */ MFP_CFG_IN(GPIO20, AF0), MFP_CFG_IN(GPIO21, AF0), MFP_CFG_IN(GPIO33, AF0), MFP_CFG_OUT(GPIO49, AF0, DRIVE_HIGH), MFP_CFG_OUT(GPIO57, AF0, DRIVE_HIGH), MFP_CFG_IN(GPIO96, AF0), MFP_CFG_OUT(GPIO116, AF0, DRIVE_HIGH), }; /* LCD Screen and Backlight */ static struct platform_pwm_backlight_data mioa701_backlight_data = { .pwm_id = 0, .max_brightness = 100, .dft_brightness = 50, .pwm_period_ns = 4000 * 1024, /* Fl = 250kHz */ .enable_gpio = -1, }; /* * LTM0305A776C LCD panel timings * * see: * - the LTM0305A776C datasheet, * - and the PXA27x Programmers' manual */ static struct pxafb_mode_info mioa701_ltm0305a776c = { .pixclock = 220000, /* CLK=4.545 MHz */ .xres = 240, .yres = 320, .bpp = 16, .hsync_len = 4, .vsync_len = 2, .left_margin = 6, .right_margin = 4, .upper_margin = 5, .lower_margin = 3, }; static void mioa701_lcd_power(int on, struct fb_var_screeninfo *si) { gpio_set_value(GPIO87_LCD_POWER, on); } static struct pxafb_mach_info mioa701_pxafb_info = { .modes = &mioa701_ltm0305a776c, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, .pxafb_lcd_power = mioa701_lcd_power, }; /* * Keyboard configuration */ static const unsigned int mioa701_matrix_keys[] = { KEY(0, 0, KEY_UP), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_MEDIA), KEY(1, 0, KEY_DOWN), KEY(1, 1, KEY_ENTER), KEY(1, 2, KEY_CONNECT), /* GPS key */ KEY(2, 0, KEY_LEFT), KEY(2, 1, KEY_PHONE), /* Phone Green key */ KEY(2, 2, KEY_CAMERA) /* Camera key */ }; static struct matrix_keymap_data mioa701_matrix_keymap_data = { .keymap = mioa701_matrix_keys, .keymap_size = ARRAY_SIZE(mioa701_matrix_keys), }; static struct pxa27x_keypad_platform_data mioa701_keypad_info = { .matrix_key_rows = 3, .matrix_key_cols = 3, .matrix_keymap_data = &mioa701_matrix_keymap_data, }; /* * GPIO Key Configuration */ #define MIO_KEY(key, _gpio, _desc, _wakeup) \ { .code = (key), .gpio = (_gpio), .active_low = 0, \ .desc = (_desc), .type = EV_KEY, .wakeup = (_wakeup) } static struct gpio_keys_button mioa701_button_table[] = { MIO_KEY(KEY_EXIT, GPIO0_KEY_POWER, "Power button", 1), MIO_KEY(KEY_VOLUMEUP, GPIO93_KEY_VOLUME_UP, "Volume up", 0), MIO_KEY(KEY_VOLUMEDOWN, GPIO94_KEY_VOLUME_DOWN, "Volume down", 0), MIO_KEY(KEY_HP, GPIO12_HPJACK_INSERT, "HP jack detect", 0) }; static struct gpio_keys_platform_data mioa701_gpio_keys_data = { .buttons = mioa701_button_table, .nbuttons = ARRAY_SIZE(mioa701_button_table), }; /* * Leds and vibrator */ #define ONE_LED(_gpio, _name) \ { .gpio = (_gpio), .name = (_name), .active_low = true } static struct gpio_led gpio_leds[] = { ONE_LED(GPIO10_LED_nCharging, "mioa701:charging"), ONE_LED(GPIO97_LED_nBlue, "mioa701:blue"), ONE_LED(GPIO98_LED_nOrange, "mioa701:orange"), ONE_LED(GPIO82_LED_nVibra, "mioa701:vibra"), ONE_LED(GPIO115_LED_nKeyboard, "mioa701:keyboard") }; static struct gpio_led_platform_data gpio_led_info = { .leds = gpio_leds, .num_leds = ARRAY_SIZE(gpio_leds), }; /* * GSM Sagem XS200 chip * * GSM handling was purged from kernel. For history, this is the way to go : * - init : GPIO24_GSM_MOD_RESET_CMD = 0, GPIO114_GSM_nMOD_DTE_UART_STATE = 1 * GPIO88_GSM_nMOD_ON_CMD = 1, GPIO90_GSM_nMOD_OFF_CMD = 1 * - reset : GPIO24_GSM_MOD_RESET_CMD = 1, msleep(100), * GPIO24_GSM_MOD_RESET_CMD = 0 * - turn on : GPIO88_GSM_nMOD_ON_CMD = 0, msleep(1000), * GPIO88_GSM_nMOD_ON_CMD = 1 * - turn off : GPIO90_GSM_nMOD_OFF_CMD = 0, msleep(1000), * GPIO90_GSM_nMOD_OFF_CMD = 1 */ static int is_gsm_on(void) { int is_on; is_on = !!gpio_get_value(GPIO25_GSM_MOD_ON_STATE); return is_on; } irqreturn_t gsm_on_irq(int irq, void *p) { printk(KERN_DEBUG "Mioa701: GSM status changed to %s\n", is_gsm_on() ? "on" : "off"); return IRQ_HANDLED; } static struct gpio gsm_gpios[] = { { GPIO25_GSM_MOD_ON_STATE, GPIOF_IN, "GSM state" }, { GPIO113_GSM_EVENT, GPIOF_IN, "GSM event" }, }; static int __init gsm_init(void) { int rc; rc = gpio_request_array(ARRAY_AND_SIZE(gsm_gpios)); if (rc) goto err_gpio; rc = request_irq(gpio_to_irq(GPIO25_GSM_MOD_ON_STATE), gsm_on_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "GSM XS200 Power Irq", NULL); if (rc) goto err_irq; gpio_set_wake(GPIO113_GSM_EVENT, 1); return 0; err_irq: printk(KERN_ERR "Mioa701: Can't request GSM_ON irq\n"); gpio_free_array(ARRAY_AND_SIZE(gsm_gpios)); err_gpio: printk(KERN_ERR "Mioa701: gsm not available\n"); return rc; } static void gsm_exit(void) { free_irq(gpio_to_irq(GPIO25_GSM_MOD_ON_STATE), NULL); gpio_free_array(ARRAY_AND_SIZE(gsm_gpios)); } /* * Bluetooth BRF6150 chip * * BT handling was purged from kernel. For history, this is the way to go : * - turn on : GPIO83_BT_ON = 1 * - turn off : GPIO83_BT_ON = 0 */ /* * GPS Sirf Star III chip * * GPS handling was purged from kernel. For history, this is the way to go : * - init : GPIO23_GPS_UNKNOWN1 = 1, GPIO26_GPS_ON = 0, GPIO27_GPS_RESET = 0 * GPIO106_GPS_UNKNOWN2 = 0, GPIO107_GPS_UNKNOWN3 = 0 * - turn on : GPIO27_GPS_RESET = 1, GPIO26_GPS_ON = 1 * - turn off : GPIO26_GPS_ON = 0, GPIO27_GPS_RESET = 0 */ /* * USB UDC */ static int is_usb_connected(void) { return !gpio_get_value(GPIO13_nUSB_DETECT); } static struct pxa2xx_udc_mach_info mioa701_udc_info = { .udc_is_connected = is_usb_connected, .gpio_pullup = GPIO22_USB_ENABLE, }; struct gpio_vbus_mach_info gpio_vbus_data = { .gpio_vbus = GPIO13_nUSB_DETECT, .gpio_vbus_inverted = 1, .gpio_pullup = -1, }; /* * SDIO/MMC Card controller */ /** * The card detect interrupt isn't debounced so we delay it by 250ms * to give the card a chance to fully insert/eject. */ static struct pxamci_platform_data mioa701_mci_info = { .detect_delay_ms = 250, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .gpio_card_detect = GPIO15_SDIO_INSERT, .gpio_card_ro = GPIO78_SDIO_RO, .gpio_power = GPIO91_SDIO_EN, }; /* FlashRAM */ static struct resource docg3_resource = { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_8K - 1, .flags = IORESOURCE_MEM, }; static struct platform_device docg3 = { .name = "docg3", .id = -1, .resource = &docg3_resource, .num_resources = 1, .dev = { .platform_data = NULL, }, }; /* * Suspend/Resume bootstrap management * * MIO A701 reboot sequence is highly ROM dependent. From the one dissassembled, * this sequence is as follows : * - disables interrupts * - initialize SDRAM (self refresh RAM into active RAM) * - initialize GPIOs (depends on value at 0xa020b020) * - initialize coprossessors * - if edge detect on PWR_SCL(GPIO3), then proceed to cold start * - or if value at 0xa020b000 not equal to 0x0f0f0f0f, proceed to cold start * - else do a resume, ie. jump to addr 0xa0100000 */ #define RESUME_ENABLE_ADDR 0xa020b000 #define RESUME_ENABLE_VAL 0x0f0f0f0f #define RESUME_BT_ADDR 0xa020b020 #define RESUME_UNKNOWN_ADDR 0xa020b024 #define RESUME_VECTOR_ADDR 0xa0100000 #define BOOTSTRAP_WORDS mioa701_bootstrap_lg/4 static u32 *save_buffer; static void install_bootstrap(void) { int i; u32 *rom_bootstrap = phys_to_virt(RESUME_VECTOR_ADDR); u32 *src = &mioa701_bootstrap; for (i = 0; i < BOOTSTRAP_WORDS; i++) rom_bootstrap[i] = src[i]; } static int mioa701_sys_suspend(void) { int i = 0, is_bt_on; u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); u32 *mem_resume_enabler = phys_to_virt(RESUME_ENABLE_ADDR); u32 *mem_resume_bt = phys_to_virt(RESUME_BT_ADDR); u32 *mem_resume_unknown = phys_to_virt(RESUME_UNKNOWN_ADDR); /* Devices prepare suspend */ is_bt_on = !!gpio_get_value(GPIO83_BT_ON); pxa2xx_mfp_set_lpm(GPIO83_BT_ON, is_bt_on ? MFP_LPM_DRIVE_HIGH : MFP_LPM_DRIVE_LOW); for (i = 0; i < BOOTSTRAP_WORDS; i++) save_buffer[i] = mem_resume_vector[i]; save_buffer[i++] = *mem_resume_enabler; save_buffer[i++] = *mem_resume_bt; save_buffer[i++] = *mem_resume_unknown; *mem_resume_enabler = RESUME_ENABLE_VAL; *mem_resume_bt = is_bt_on; install_bootstrap(); return 0; } static void mioa701_sys_resume(void) { int i = 0; u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); u32 *mem_resume_enabler = phys_to_virt(RESUME_ENABLE_ADDR); u32 *mem_resume_bt = phys_to_virt(RESUME_BT_ADDR); u32 *mem_resume_unknown = phys_to_virt(RESUME_UNKNOWN_ADDR); for (i = 0; i < BOOTSTRAP_WORDS; i++) mem_resume_vector[i] = save_buffer[i]; *mem_resume_enabler = save_buffer[i++]; *mem_resume_bt = save_buffer[i++]; *mem_resume_unknown = save_buffer[i++]; } static struct syscore_ops mioa701_syscore_ops = { .suspend = mioa701_sys_suspend, .resume = mioa701_sys_resume, }; static int __init bootstrap_init(void) { int save_size = mioa701_bootstrap_lg + (sizeof(u32) * 3); register_syscore_ops(&mioa701_syscore_ops); save_buffer = kmalloc(save_size, GFP_KERNEL); if (!save_buffer) return -ENOMEM; printk(KERN_INFO "MioA701: allocated %d bytes for bootstrap\n", save_size); return 0; } static void bootstrap_exit(void) { kfree(save_buffer); unregister_syscore_ops(&mioa701_syscore_ops); printk(KERN_CRIT "Unregistering mioa701 suspend will hang next" "resume !!!\n"); } /* * Power Supply */ static char *supplicants[] = { "mioa701_battery" }; static int is_ac_connected(void) { return gpio_get_value(GPIO96_AC_DETECT); } static void mioa701_set_charge(int flags) { gpio_set_value(GPIO9_CHARGE_EN, (flags == PDA_POWER_CHARGE_USB)); } static struct pda_power_pdata power_pdata = { .is_ac_online = is_ac_connected, .is_usb_online = is_usb_connected, .set_charge = mioa701_set_charge, .supplied_to = supplicants, .num_supplicants = ARRAY_SIZE(supplicants), }; static struct resource power_resources[] = { [0] = { .name = "ac", .start = PXA_GPIO_TO_IRQ(GPIO96_AC_DETECT), .end = PXA_GPIO_TO_IRQ(GPIO96_AC_DETECT), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, }, [1] = { .name = "usb", .start = PXA_GPIO_TO_IRQ(GPIO13_nUSB_DETECT), .end = PXA_GPIO_TO_IRQ(GPIO13_nUSB_DETECT), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, }, }; static struct platform_device power_dev = { .name = "pda-power", .id = -1, .resource = power_resources, .num_resources = ARRAY_SIZE(power_resources), .dev = { .platform_data = &power_pdata, }, }; static struct wm97xx_batt_pdata mioa701_battery_data = { .batt_aux = WM97XX_AUX_ID1, .temp_aux = -1, .charge_gpio = -1, .min_voltage = 0xc00, .max_voltage = 0xfc0, .batt_tech = POWER_SUPPLY_TECHNOLOGY_LION, .batt_div = 1, .batt_mult = 1, .batt_name = "mioa701_battery", }; static struct wm97xx_pdata mioa701_wm97xx_pdata = { .batt_pdata = &mioa701_battery_data, }; /* * Voltage regulation */ static struct regulator_consumer_supply max1586_consumers[] = { REGULATOR_SUPPLY("vcc_core", NULL), }; static struct regulator_init_data max1586_v3_info = { .constraints = { .name = "vcc_core range", .min_uV = 1000000, .max_uV = 1705000, .always_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, .num_consumer_supplies = ARRAY_SIZE(max1586_consumers), .consumer_supplies = max1586_consumers, }; static struct max1586_subdev_data max1586_subdevs[] = { { .name = "vcc_core", .id = MAX1586_V3, .platform_data = &max1586_v3_info }, }; static struct max1586_platform_data max1586_info = { .subdevs = max1586_subdevs, .num_subdevs = ARRAY_SIZE(max1586_subdevs), .v3_gain = MAX1586_GAIN_NO_R24, /* 700..1475 mV */ }; /* * Camera interface */ struct pxacamera_platform_data mioa701_pxacamera_platform_data = { .flags = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 | PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN, .mclk_10khz = 5000, }; static struct i2c_board_info __initdata mioa701_pi2c_devices[] = { { I2C_BOARD_INFO("max1586", 0x14), .platform_data = &max1586_info, }, }; /* Board I2C devices. */ static struct i2c_board_info mioa701_i2c_devices[] = { { I2C_BOARD_INFO("mt9m111", 0x5d), }, }; static struct soc_camera_link iclink = { .bus_id = 0, /* Match id in pxa27x_device_camera in device.c */ .board_info = &mioa701_i2c_devices[0], .i2c_adapter_id = 0, }; struct i2c_pxa_platform_data i2c_pdata = { .fast_mode = 1, }; static pxa2xx_audio_ops_t mioa701_ac97_info = { .reset_gpio = 95, .codec_pdata = { &mioa701_wm97xx_pdata, }, }; /* * Mio global */ /* Devices */ #define MIO_PARENT_DEV(var, strname, tparent, pdata) \ static struct platform_device var = { \ .name = strname, \ .id = -1, \ .dev = { \ .platform_data = pdata, \ .parent = tparent, \ }, \ }; #define MIO_SIMPLE_DEV(var, strname, pdata) \ MIO_PARENT_DEV(var, strname, NULL, pdata) MIO_SIMPLE_DEV(mioa701_gpio_keys, "gpio-keys", &mioa701_gpio_keys_data) MIO_PARENT_DEV(mioa701_backlight, "pwm-backlight", &pxa27x_device_pwm0.dev, &mioa701_backlight_data); MIO_SIMPLE_DEV(mioa701_led, "leds-gpio", &gpio_led_info) MIO_SIMPLE_DEV(pxa2xx_pcm, "pxa2xx-pcm", NULL) MIO_SIMPLE_DEV(mioa701_sound, "mioa701-wm9713", NULL) MIO_SIMPLE_DEV(mioa701_board, "mioa701-board", NULL) MIO_SIMPLE_DEV(gpio_vbus, "gpio-vbus", &gpio_vbus_data); MIO_SIMPLE_DEV(mioa701_camera, "soc-camera-pdrv",&iclink); static struct platform_device *devices[] __initdata = { &mioa701_gpio_keys, &mioa701_backlight, &mioa701_led, &pxa2xx_pcm, &mioa701_sound, &power_dev, &docg3, &gpio_vbus, &mioa701_camera, &mioa701_board, }; static void mioa701_machine_exit(void); static void mioa701_poweroff(void) { mioa701_machine_exit(); pxa_restart(REBOOT_SOFT, NULL); } static void mioa701_restart(enum reboot_mode c, const char *cmd) { mioa701_machine_exit(); pxa_restart(REBOOT_SOFT, cmd); } static struct gpio global_gpios[] = { { GPIO9_CHARGE_EN, GPIOF_OUT_INIT_HIGH, "Charger enable" }, { GPIO18_POWEROFF, GPIOF_OUT_INIT_LOW, "Power Off" }, { GPIO87_LCD_POWER, GPIOF_OUT_INIT_LOW, "LCD Power" }, { GPIO56_MT9M111_nOE, GPIOF_OUT_INIT_LOW, "Camera nOE" }, }; static struct regulator_consumer_supply fixed_5v0_consumers[] = { REGULATOR_SUPPLY("power", "pwm-backlight"), }; static void __init mioa701_machine_init(void) { int rc; PSLR = 0xff100000; /* SYSDEL=125ms, PWRDEL=125ms, PSLR_SL_ROD=1 */ PCFR = PCFR_DC_EN | PCFR_GPR_EN | PCFR_OPDE; RTTR = 32768 - 1; /* Reset crazy WinCE value */ UP2OCR = UP2OCR_HXOE; /* * Set up the flash memory : DiskOnChip G3 on first static memory bank */ __raw_writel(0x7ff02dd8, MSC0); __raw_writel(0x0001c391, MCMEM0); __raw_writel(0x0001c391, MCATT0); __raw_writel(0x0001c391, MCIO0); pxa2xx_mfp_config(ARRAY_AND_SIZE(mioa701_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); rc = gpio_request_array(ARRAY_AND_SIZE(global_gpios)); if (rc) pr_err("MioA701: Failed to request GPIOs: %d", rc); bootstrap_init(); pxa_set_fb_info(NULL, &mioa701_pxafb_info); pxa_set_mci_info(&mioa701_mci_info); pxa_set_keypad_info(&mioa701_keypad_info); pxa_set_udc_info(&mioa701_udc_info); pxa_set_ac97_info(&mioa701_ac97_info); pm_power_off = mioa701_poweroff; platform_add_devices(devices, ARRAY_SIZE(devices)); gsm_init(); i2c_register_board_info(1, ARRAY_AND_SIZE(mioa701_pi2c_devices)); pxa_set_i2c_info(&i2c_pdata); pxa27x_set_i2c_power_info(NULL); pxa_set_camera_info(&mioa701_pxacamera_platform_data); regulator_register_always_on(0, "fixed-5.0V", fixed_5v0_consumers, ARRAY_SIZE(fixed_5v0_consumers), 5000000); } static void mioa701_machine_exit(void) { bootstrap_exit(); gsm_exit(); } MACHINE_START(MIOA701, "MIO A701") .atag_offset = 0x100, .map_io = &pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = &pxa27x_init_irq, .handle_irq = &pxa27x_handle_irq, .init_machine = mioa701_machine_init, .init_time = pxa_timer_init, .restart = mioa701_restart, MACHINE_END
gpl-2.0
PikkonX/T989_ICS_KERNEL-
drivers/net/wireless/iwlegacy/iwl-devtrace.c
2606
1826
/****************************************************************************** * * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/module.h> /* sparse doesn't like tracepoint macros */ #ifndef __CHECKER__ #include "iwl-dev.h" #define CREATE_TRACE_POINTS #include "iwl-devtrace.h" EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event); #endif
gpl-2.0
DirtyUnicorns/android_kernel_lge_gee
security/selinux/avc.c
2606
23191
/* * Implementation of the kernel access vector cache (AVC). * * Authors: Stephen Smalley, <sds@epoch.ncsc.mil> * James Morris <jmorris@redhat.com> * * Update: KaiGai, Kohei <kaigai@ak.jp.nec.com> * Replaced the avc_lock spinlock by RCU. * * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/dcache.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/percpu.h> #include <net/sock.h> #include <linux/un.h> #include <net/af_unix.h> #include <linux/ip.h> #include <linux/audit.h> #include <linux/ipv6.h> #include <net/ipv6.h> #include "avc.h" #include "avc_ss.h" #include "classmap.h" #define AVC_CACHE_SLOTS 512 #define AVC_DEF_CACHE_THRESHOLD 512 #define AVC_CACHE_RECLAIM 16 #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS #define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field) #else #define avc_cache_stats_incr(field) do {} while (0) #endif struct avc_entry { u32 ssid; u32 tsid; u16 tclass; struct av_decision avd; }; struct avc_node { struct avc_entry ae; struct hlist_node list; /* anchored in avc_cache->slots[i] */ struct rcu_head rhead; }; struct avc_cache { struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ atomic_t lru_hint; /* LRU hint for reclaim scan */ atomic_t active_nodes; u32 latest_notif; /* latest revocation notification */ }; struct avc_callback_node { int (*callback) (u32 event, u32 ssid, u32 tsid, u16 tclass, u32 perms, u32 *out_retained); u32 events; u32 ssid; u32 tsid; u16 tclass; u32 perms; struct avc_callback_node *next; }; /* Exported via selinufs */ unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD; #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 }; #endif static struct avc_cache avc_cache; static struct avc_callback_node *avc_callbacks; static struct kmem_cache *avc_node_cachep; static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) { return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1); } /** * avc_dump_av - Display an access vector in human-readable form. * @tclass: target security class * @av: access vector */ static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av) { const char **perms; int i, perm; if (av == 0) { audit_log_format(ab, " null"); return; } perms = secclass_map[tclass-1].perms; audit_log_format(ab, " {"); i = 0; perm = 1; while (i < (sizeof(av) * 8)) { if ((perm & av) && perms[i]) { audit_log_format(ab, " %s", perms[i]); av &= ~perm; } i++; perm <<= 1; } if (av) audit_log_format(ab, " 0x%x", av); audit_log_format(ab, " }"); } /** * avc_dump_query - Display a SID pair and a class in human-readable form. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class */ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass) { int rc; char *scontext; u32 scontext_len; rc = security_sid_to_context(ssid, &scontext, &scontext_len); if (rc) audit_log_format(ab, "ssid=%d", ssid); else { audit_log_format(ab, "scontext=%s", scontext); kfree(scontext); } rc = security_sid_to_context(tsid, &scontext, &scontext_len); if (rc) audit_log_format(ab, " tsid=%d", tsid); else { audit_log_format(ab, " tcontext=%s", scontext); kfree(scontext); } BUG_ON(tclass >= ARRAY_SIZE(secclass_map)); audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name); } /** * avc_init - Initialize the AVC. * * Initialize the access vector cache. */ void __init avc_init(void) { int i; for (i = 0; i < AVC_CACHE_SLOTS; i++) { INIT_HLIST_HEAD(&avc_cache.slots[i]); spin_lock_init(&avc_cache.slots_lock[i]); } atomic_set(&avc_cache.active_nodes, 0); atomic_set(&avc_cache.lru_hint, 0); avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node), 0, SLAB_PANIC, NULL); audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n"); } int avc_get_hash_stats(char *page) { int i, chain_len, max_chain_len, slots_used; struct avc_node *node; struct hlist_head *head; rcu_read_lock(); slots_used = 0; max_chain_len = 0; for (i = 0; i < AVC_CACHE_SLOTS; i++) { head = &avc_cache.slots[i]; if (!hlist_empty(head)) { struct hlist_node *next; slots_used++; chain_len = 0; hlist_for_each_entry_rcu(node, next, head, list) chain_len++; if (chain_len > max_chain_len) max_chain_len = chain_len; } } rcu_read_unlock(); return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n" "longest chain: %d\n", atomic_read(&avc_cache.active_nodes), slots_used, AVC_CACHE_SLOTS, max_chain_len); } static void avc_node_free(struct rcu_head *rhead) { struct avc_node *node = container_of(rhead, struct avc_node, rhead); kmem_cache_free(avc_node_cachep, node); avc_cache_stats_incr(frees); } static void avc_node_delete(struct avc_node *node) { hlist_del_rcu(&node->list); call_rcu(&node->rhead, avc_node_free); atomic_dec(&avc_cache.active_nodes); } static void avc_node_kill(struct avc_node *node) { kmem_cache_free(avc_node_cachep, node); avc_cache_stats_incr(frees); atomic_dec(&avc_cache.active_nodes); } static void avc_node_replace(struct avc_node *new, struct avc_node *old) { hlist_replace_rcu(&old->list, &new->list); call_rcu(&old->rhead, avc_node_free); atomic_dec(&avc_cache.active_nodes); } static inline int avc_reclaim_node(void) { struct avc_node *node; int hvalue, try, ecx; unsigned long flags; struct hlist_head *head; struct hlist_node *next; spinlock_t *lock; for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); head = &avc_cache.slots[hvalue]; lock = &avc_cache.slots_lock[hvalue]; if (!spin_trylock_irqsave(lock, flags)) continue; rcu_read_lock(); hlist_for_each_entry(node, next, head, list) { avc_node_delete(node); avc_cache_stats_incr(reclaims); ecx++; if (ecx >= AVC_CACHE_RECLAIM) { rcu_read_unlock(); spin_unlock_irqrestore(lock, flags); goto out; } } rcu_read_unlock(); spin_unlock_irqrestore(lock, flags); } out: return ecx; } static struct avc_node *avc_alloc_node(void) { struct avc_node *node; node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC); if (!node) goto out; INIT_HLIST_NODE(&node->list); avc_cache_stats_incr(allocations); if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold) avc_reclaim_node(); out: return node; } static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) { node->ae.ssid = ssid; node->ae.tsid = tsid; node->ae.tclass = tclass; memcpy(&node->ae.avd, avd, sizeof(node->ae.avd)); } static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) { struct avc_node *node, *ret = NULL; int hvalue; struct hlist_head *head; struct hlist_node *next; hvalue = avc_hash(ssid, tsid, tclass); head = &avc_cache.slots[hvalue]; hlist_for_each_entry_rcu(node, next, head, list) { if (ssid == node->ae.ssid && tclass == node->ae.tclass && tsid == node->ae.tsid) { ret = node; break; } } return ret; } /** * avc_lookup - Look up an AVC entry. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * * Look up an AVC entry that is valid for the * (@ssid, @tsid), interpreting the permissions * based on @tclass. If a valid AVC entry exists, * then this function returns the avc_node. * Otherwise, this function returns NULL. */ static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass) { struct avc_node *node; avc_cache_stats_incr(lookups); node = avc_search_node(ssid, tsid, tclass); if (node) return node; avc_cache_stats_incr(misses); return NULL; } static int avc_latest_notif_update(int seqno, int is_insert) { int ret = 0; static DEFINE_SPINLOCK(notif_lock); unsigned long flag; spin_lock_irqsave(&notif_lock, flag); if (is_insert) { if (seqno < avc_cache.latest_notif) { printk(KERN_WARNING "SELinux: avc: seqno %d < latest_notif %d\n", seqno, avc_cache.latest_notif); ret = -EAGAIN; } } else { if (seqno > avc_cache.latest_notif) avc_cache.latest_notif = seqno; } spin_unlock_irqrestore(&notif_lock, flag); return ret; } /** * avc_insert - Insert an AVC entry. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @avd: resulting av decision * * Insert an AVC entry for the SID pair * (@ssid, @tsid) and class @tclass. * The access vectors and the sequence number are * normally provided by the security server in * response to a security_compute_av() call. If the * sequence number @avd->seqno is not less than the latest * revocation notification, then the function copies * the access vectors into a cache entry, returns * avc_node inserted. Otherwise, this function returns NULL. */ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) { struct avc_node *pos, *node = NULL; int hvalue; unsigned long flag; if (avc_latest_notif_update(avd->seqno, 1)) goto out; node = avc_alloc_node(); if (node) { struct hlist_head *head; struct hlist_node *next; spinlock_t *lock; hvalue = avc_hash(ssid, tsid, tclass); avc_node_populate(node, ssid, tsid, tclass, avd); head = &avc_cache.slots[hvalue]; lock = &avc_cache.slots_lock[hvalue]; spin_lock_irqsave(lock, flag); hlist_for_each_entry(pos, next, head, list) { if (pos->ae.ssid == ssid && pos->ae.tsid == tsid && pos->ae.tclass == tclass) { avc_node_replace(node, pos); goto found; } } hlist_add_head_rcu(&node->list, head); found: spin_unlock_irqrestore(lock, flag); } out: return node; } /** * avc_audit_pre_callback - SELinux specific information * will be called by generic audit code * @ab: the audit buffer * @a: audit_data */ static void avc_audit_pre_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; audit_log_format(ab, "avc: %s ", ad->selinux_audit_data->slad->denied ? "denied" : "granted"); avc_dump_av(ab, ad->selinux_audit_data->slad->tclass, ad->selinux_audit_data->slad->audited); audit_log_format(ab, " for "); } /** * avc_audit_post_callback - SELinux specific information * will be called by generic audit code * @ab: the audit buffer * @a: audit_data */ static void avc_audit_post_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; audit_log_format(ab, " "); avc_dump_query(ab, ad->selinux_audit_data->slad->ssid, ad->selinux_audit_data->slad->tsid, ad->selinux_audit_data->slad->tclass); } /* This is the slow part of avc audit with big stack footprint */ static noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, u32 audited, u32 denied, struct common_audit_data *a, unsigned flags) { struct common_audit_data stack_data; struct selinux_audit_data sad = {0,}; struct selinux_late_audit_data slad; if (!a) { a = &stack_data; COMMON_AUDIT_DATA_INIT(a, NONE); a->selinux_audit_data = &sad; } /* * When in a RCU walk do the audit on the RCU retry. This is because * the collection of the dname in an inode audit message is not RCU * safe. Note this may drop some audits when the situation changes * during retry. However this is logically just as if the operation * happened a little later. */ if ((a->type == LSM_AUDIT_DATA_INODE) && (flags & MAY_NOT_BLOCK)) return -ECHILD; slad.tclass = tclass; slad.requested = requested; slad.ssid = ssid; slad.tsid = tsid; slad.audited = audited; slad.denied = denied; a->selinux_audit_data->slad = &slad; common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback); return 0; } /** * avc_audit - Audit the granting or denial of permissions. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @requested: requested permissions * @avd: access vector decisions * @result: result from avc_has_perm_noaudit * @a: auxiliary audit data * @flags: VFS walk flags * * Audit the granting or denial of permissions in accordance * with the policy. This function is typically called by * avc_has_perm() after a permission check, but can also be * called directly by callers who use avc_has_perm_noaudit() * in order to separate the permission check from the auditing. * For example, this separation is useful when the permission check must * be performed under a lock, to allow the lock to be released * before calling the auditing code. */ inline int avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct av_decision *avd, int result, struct common_audit_data *a, unsigned flags) { u32 denied, audited; denied = requested & ~avd->allowed; if (unlikely(denied)) { audited = denied & avd->auditdeny; /* * a->selinux_audit_data->auditdeny is TRICKY! Setting a bit in * this field means that ANY denials should NOT be audited if * the policy contains an explicit dontaudit rule for that * permission. Take notice that this is unrelated to the * actual permissions that were denied. As an example lets * assume: * * denied == READ * avd.auditdeny & ACCESS == 0 (not set means explicit rule) * selinux_audit_data->auditdeny & ACCESS == 1 * * We will NOT audit the denial even though the denied * permission was READ and the auditdeny checks were for * ACCESS */ if (a && a->selinux_audit_data->auditdeny && !(a->selinux_audit_data->auditdeny & avd->auditdeny)) audited = 0; } else if (result) audited = denied = requested; else audited = requested & avd->auditallow; if (likely(!audited)) return 0; return slow_avc_audit(ssid, tsid, tclass, requested, audited, denied, a, flags); } /** * avc_add_callback - Register a callback for security events. * @callback: callback function * @events: security events * @ssid: source security identifier or %SECSID_WILD * @tsid: target security identifier or %SECSID_WILD * @tclass: target security class * @perms: permissions * * Register a callback function for events in the set @events * related to the SID pair (@ssid, @tsid) * and the permissions @perms, interpreting * @perms based on @tclass. Returns %0 on success or * -%ENOMEM if insufficient memory exists to add the callback. */ int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid, u16 tclass, u32 perms, u32 *out_retained), u32 events, u32 ssid, u32 tsid, u16 tclass, u32 perms) { struct avc_callback_node *c; int rc = 0; c = kmalloc(sizeof(*c), GFP_ATOMIC); if (!c) { rc = -ENOMEM; goto out; } c->callback = callback; c->events = events; c->ssid = ssid; c->tsid = tsid; c->perms = perms; c->next = avc_callbacks; avc_callbacks = c; out: return rc; } static inline int avc_sidcmp(u32 x, u32 y) { return (x == y || x == SECSID_WILD || y == SECSID_WILD); } /** * avc_update_node Update an AVC entry * @event : Updating event * @perms : Permission mask bits * @ssid,@tsid,@tclass : identifier of an AVC entry * @seqno : sequence number when decision was made * * if a valid AVC entry doesn't exist,this function returns -ENOENT. * if kmalloc() called internal returns NULL, this function returns -ENOMEM. * otherwise, this function updates the AVC entry. The original AVC-entry object * will release later by RCU. */ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, u32 seqno) { int hvalue, rc = 0; unsigned long flag; struct avc_node *pos, *node, *orig = NULL; struct hlist_head *head; struct hlist_node *next; spinlock_t *lock; node = avc_alloc_node(); if (!node) { rc = -ENOMEM; goto out; } /* Lock the target slot */ hvalue = avc_hash(ssid, tsid, tclass); head = &avc_cache.slots[hvalue]; lock = &avc_cache.slots_lock[hvalue]; spin_lock_irqsave(lock, flag); hlist_for_each_entry(pos, next, head, list) { if (ssid == pos->ae.ssid && tsid == pos->ae.tsid && tclass == pos->ae.tclass && seqno == pos->ae.avd.seqno){ orig = pos; break; } } if (!orig) { rc = -ENOENT; avc_node_kill(node); goto out_unlock; } /* * Copy and replace original node. */ avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd); switch (event) { case AVC_CALLBACK_GRANT: node->ae.avd.allowed |= perms; break; case AVC_CALLBACK_TRY_REVOKE: case AVC_CALLBACK_REVOKE: node->ae.avd.allowed &= ~perms; break; case AVC_CALLBACK_AUDITALLOW_ENABLE: node->ae.avd.auditallow |= perms; break; case AVC_CALLBACK_AUDITALLOW_DISABLE: node->ae.avd.auditallow &= ~perms; break; case AVC_CALLBACK_AUDITDENY_ENABLE: node->ae.avd.auditdeny |= perms; break; case AVC_CALLBACK_AUDITDENY_DISABLE: node->ae.avd.auditdeny &= ~perms; break; } avc_node_replace(node, orig); out_unlock: spin_unlock_irqrestore(lock, flag); out: return rc; } /** * avc_flush - Flush the cache */ static void avc_flush(void) { struct hlist_head *head; struct hlist_node *next; struct avc_node *node; spinlock_t *lock; unsigned long flag; int i; for (i = 0; i < AVC_CACHE_SLOTS; i++) { head = &avc_cache.slots[i]; lock = &avc_cache.slots_lock[i]; spin_lock_irqsave(lock, flag); /* * With preemptable RCU, the outer spinlock does not * prevent RCU grace periods from ending. */ rcu_read_lock(); hlist_for_each_entry(node, next, head, list) avc_node_delete(node); rcu_read_unlock(); spin_unlock_irqrestore(lock, flag); } } /** * avc_ss_reset - Flush the cache and revalidate migrated permissions. * @seqno: policy sequence number */ int avc_ss_reset(u32 seqno) { struct avc_callback_node *c; int rc = 0, tmprc; avc_flush(); for (c = avc_callbacks; c; c = c->next) { if (c->events & AVC_CALLBACK_RESET) { tmprc = c->callback(AVC_CALLBACK_RESET, 0, 0, 0, 0, NULL); /* save the first error encountered for the return value and continue processing the callbacks */ if (!rc) rc = tmprc; } } avc_latest_notif_update(seqno, 0); return rc; } /* * Slow-path helper function for avc_has_perm_noaudit, * when the avc_node lookup fails. We get called with * the RCU read lock held, and need to return with it * still held, but drop if for the security compute. * * Don't inline this, since it's the slow-path and just * results in a bigger stack frame. */ static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) { rcu_read_unlock(); security_compute_av(ssid, tsid, tclass, avd); rcu_read_lock(); return avc_insert(ssid, tsid, tclass, avd); } static noinline int avc_denied(u32 ssid, u32 tsid, u16 tclass, u32 requested, unsigned flags, struct av_decision *avd) { if (flags & AVC_STRICT) return -EACCES; if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE)) return -EACCES; avc_update_node(AVC_CALLBACK_GRANT, requested, ssid, tsid, tclass, avd->seqno); return 0; } /** * avc_has_perm_noaudit - Check permissions but perform no auditing. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @requested: requested permissions, interpreted based on @tclass * @flags: AVC_STRICT or 0 * @avd: access vector decisions * * Check the AVC to determine whether the @requested permissions are granted * for the SID pair (@ssid, @tsid), interpreting the permissions * based on @tclass, and call the security server on a cache miss to obtain * a new decision and add it to the cache. Return a copy of the decisions * in @avd. Return %0 if all @requested permissions are granted, * -%EACCES if any permissions are denied, or another -errno upon * other errors. This function is typically called by avc_has_perm(), * but may also be called directly to separate permission checking from * auditing, e.g. in cases where a lock must be held for the check but * should be released for the auditing. */ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested, unsigned flags, struct av_decision *avd) { struct avc_node *node; int rc = 0; u32 denied; BUG_ON(!requested); rcu_read_lock(); node = avc_lookup(ssid, tsid, tclass); if (unlikely(!node)) { node = avc_compute_av(ssid, tsid, tclass, avd); } else { memcpy(avd, &node->ae.avd, sizeof(*avd)); avd = &node->ae.avd; } denied = requested & ~(avd->allowed); if (unlikely(denied)) rc = avc_denied(ssid, tsid, tclass, requested, flags, avd); rcu_read_unlock(); return rc; } /** * avc_has_perm - Check permissions and perform any appropriate auditing. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @requested: requested permissions, interpreted based on @tclass * @auditdata: auxiliary audit data * @flags: VFS walk flags * * Check the AVC to determine whether the @requested permissions are granted * for the SID pair (@ssid, @tsid), interpreting the permissions * based on @tclass, and call the security server on a cache miss to obtain * a new decision and add it to the cache. Audit the granting or denial of * permissions in accordance with the policy. Return %0 if all @requested * permissions are granted, -%EACCES if any permissions are denied, or * another -errno upon other errors. */ int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct common_audit_data *auditdata, unsigned flags) { struct av_decision avd; int rc, rc2; rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, flags); if (rc2) return rc2; return rc; } u32 avc_policy_seqno(void) { return avc_cache.latest_notif; } void avc_disable(void) { /* * If you are looking at this because you have realized that we are * not destroying the avc_node_cachep it might be easy to fix, but * I don't know the memory barrier semantics well enough to know. It's * possible that some other task dereferenced security_ops when * it still pointed to selinux operations. If that is the case it's * possible that it is about to use the avc and is about to need the * avc_node_cachep. I know I could wrap the security.c security_ops call * in an rcu_lock, but seriously, it's not worth it. Instead I just flush * the cache and get that memory back. */ if (avc_node_cachep) { avc_flush(); /* kmem_cache_destroy(avc_node_cachep); */ } }
gpl-2.0
voidz777/android_kernel_samsung_tuna
arch/arm/mach-omap2/board-zoom-display.c
2862
3632
/* * Copyright (C) 2010 Texas Instruments Inc. * * Modified from mach-omap2/board-zoom-peripherals.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/i2c/twl.h> #include <linux/spi/spi.h> #include <plat/mcspi.h> #include <video/omapdss.h> #define LCD_PANEL_RESET_GPIO_PROD 96 #define LCD_PANEL_RESET_GPIO_PILOT 55 #define LCD_PANEL_QVGA_GPIO 56 static struct gpio zoom_lcd_gpios[] __initdata = { { -EINVAL, GPIOF_OUT_INIT_HIGH, "lcd reset" }, { LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" }, }; static void __init zoom_lcd_panel_init(void) { zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ? LCD_PANEL_RESET_GPIO_PROD : LCD_PANEL_RESET_GPIO_PILOT; if (gpio_request_array(zoom_lcd_gpios, ARRAY_SIZE(zoom_lcd_gpios))) pr_err("%s: Failed to get LCD GPIOs.\n", __func__); } static int zoom_panel_enable_lcd(struct omap_dss_device *dssdev) { return 0; } static void zoom_panel_disable_lcd(struct omap_dss_device *dssdev) { } /* * PWMA/B register offsets (TWL4030_MODULE_PWMA) */ #define TWL_INTBR_PMBR1 0xD #define TWL_INTBR_GPBR1 0xC #define TWL_LED_PWMON 0x0 #define TWL_LED_PWMOFF 0x1 static int zoom_set_bl_intensity(struct omap_dss_device *dssdev, int level) { unsigned char c; u8 mux_pwm, enb_pwm; if (level > 100) return -1; twl_i2c_read_u8(TWL4030_MODULE_INTBR, &mux_pwm, TWL_INTBR_PMBR1); twl_i2c_read_u8(TWL4030_MODULE_INTBR, &enb_pwm, TWL_INTBR_GPBR1); if (level == 0) { /* disable pwm1 output and clock */ enb_pwm = enb_pwm & 0xF5; /* change pwm1 pin to gpio pin */ mux_pwm = mux_pwm & 0xCF; twl_i2c_write_u8(TWL4030_MODULE_INTBR, enb_pwm, TWL_INTBR_GPBR1); twl_i2c_write_u8(TWL4030_MODULE_INTBR, mux_pwm, TWL_INTBR_PMBR1); return 0; } if (!((enb_pwm & 0xA) && (mux_pwm & 0x30))) { /* change gpio pin to pwm1 pin */ mux_pwm = mux_pwm | 0x30; /* enable pwm1 output and clock*/ enb_pwm = enb_pwm | 0x0A; twl_i2c_write_u8(TWL4030_MODULE_INTBR, mux_pwm, TWL_INTBR_PMBR1); twl_i2c_write_u8(TWL4030_MODULE_INTBR, enb_pwm, TWL_INTBR_GPBR1); } c = ((50 * (100 - level)) / 100) + 1; twl_i2c_write_u8(TWL4030_MODULE_PWM1, 0x7F, TWL_LED_PWMOFF); twl_i2c_write_u8(TWL4030_MODULE_PWM1, c, TWL_LED_PWMON); return 0; } static struct omap_dss_device zoom_lcd_device = { .name = "lcd", .driver_name = "NEC_8048_panel", .type = OMAP_DISPLAY_TYPE_DPI, .phy.dpi.data_lines = 24, .platform_enable = zoom_panel_enable_lcd, .platform_disable = zoom_panel_disable_lcd, .max_backlight_level = 100, .set_backlight = zoom_set_bl_intensity, }; static struct omap_dss_device *zoom_dss_devices[] = { &zoom_lcd_device, }; static struct omap_dss_board_info zoom_dss_data = { .num_devices = ARRAY_SIZE(zoom_dss_devices), .devices = zoom_dss_devices, .default_device = &zoom_lcd_device, }; static struct omap2_mcspi_device_config dss_lcd_mcspi_config = { .turbo_mode = 1, .single_channel = 1, /* 0: slave, 1: master */ }; static struct spi_board_info nec_8048_spi_board_info[] __initdata = { [0] = { .modalias = "nec_8048_spi", .bus_num = 1, .chip_select = 2, .max_speed_hz = 375000, .controller_data = &dss_lcd_mcspi_config, }, }; void __init zoom_display_init(void) { omap_display_init(&zoom_dss_data); spi_register_board_info(nec_8048_spi_board_info, ARRAY_SIZE(nec_8048_spi_board_info)); zoom_lcd_panel_init(); }
gpl-2.0
ISTweak/android_kernel_sharp_is15sh
drivers/acpi/acpica/evgpe.c
3118
22317
/****************************************************************************** * * Module Name: evgpe - General Purpose Event handling and dispatch * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evgpe") /* Local prototypes */ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context); /******************************************************************************* * * FUNCTION: acpi_ev_update_gpe_enable_mask * * PARAMETERS: gpe_event_info - GPE to update * * RETURN: Status * * DESCRIPTION: Updates GPE register enable mask based upon whether there are * runtime references to this GPE * ******************************************************************************/ acpi_status acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) { struct acpi_gpe_register_info *gpe_register_info; u32 register_bit; ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask); gpe_register_info = gpe_event_info->register_info; if (!gpe_register_info) { return_ACPI_STATUS(AE_NOT_EXIST); } register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); /* Clear the run bit up front */ ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); /* Set the mask bit only if there are references to this GPE */ if (gpe_event_info->runtime_count) { ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_enable_gpe * * PARAMETERS: gpe_event_info - GPE to enable * * RETURN: Status * * DESCRIPTION: Clear a GPE of stale events and enable it. * ******************************************************************************/ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; ACPI_FUNCTION_TRACE(ev_enable_gpe); /* * We will only allow a GPE to be enabled if it has either an associated * method (_Lxx/_Exx) or a handler, or is using the implicit notify * feature. Otherwise, the GPE will be immediately disabled by * acpi_ev_gpe_dispatch the first time it fires. */ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_NONE) { return_ACPI_STATUS(AE_NO_HANDLER); } /* Clear the GPE (of stale events) */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Enable the requested GPE */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_add_gpe_reference * * PARAMETERS: gpe_event_info - Add a reference to this GPE * * RETURN: Status * * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is * hardware-enabled. * ******************************************************************************/ acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ev_add_gpe_reference); if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { return_ACPI_STATUS(AE_LIMIT); } gpe_event_info->runtime_count++; if (gpe_event_info->runtime_count == 1) { /* Enable on first reference */ status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_ev_enable_gpe(gpe_event_info); } if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count--; } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_remove_gpe_reference * * PARAMETERS: gpe_event_info - Remove a reference to this GPE * * RETURN: Status * * DESCRIPTION: Remove a reference to a GPE. When the last reference is * removed, the GPE is hardware-disabled. * ******************************************************************************/ acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ev_remove_gpe_reference); if (!gpe_event_info->runtime_count) { return_ACPI_STATUS(AE_LIMIT); } gpe_event_info->runtime_count--; if (!gpe_event_info->runtime_count) { /* Disable on last reference */ status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); } if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count++; } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_low_get_gpe_info * * PARAMETERS: gpe_number - Raw GPE number * gpe_block - A GPE info block * * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number * is not within the specified GPE block) * * DESCRIPTION: Returns the event_info struct associated with this GPE. This is * the low-level implementation of ev_get_gpe_event_info. * ******************************************************************************/ struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, struct acpi_gpe_block_info *gpe_block) { u32 gpe_index; /* * Validate that the gpe_number is within the specified gpe_block. * (Two steps) */ if (!gpe_block || (gpe_number < gpe_block->block_base_number)) { return (NULL); } gpe_index = gpe_number - gpe_block->block_base_number; if (gpe_index >= gpe_block->gpe_count) { return (NULL); } return (&gpe_block->event_info[gpe_index]); } /******************************************************************************* * * FUNCTION: acpi_ev_get_gpe_event_info * * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 * gpe_number - Raw GPE number * * RETURN: A GPE event_info struct. NULL if not a valid GPE * * DESCRIPTION: Returns the event_info struct associated with this GPE. * Validates the gpe_block and the gpe_number * * Should be called only when the GPE lists are semaphore locked * and not subject to change. * ******************************************************************************/ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, u32 gpe_number) { union acpi_operand_object *obj_desc; struct acpi_gpe_event_info *gpe_info; u32 i; ACPI_FUNCTION_ENTRY(); /* A NULL gpe_device means use the FADT-defined GPE block(s) */ if (!gpe_device) { /* Examine GPE Block 0 and 1 (These blocks are permanent) */ for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { gpe_info = acpi_ev_low_get_gpe_info(gpe_number, acpi_gbl_gpe_fadt_blocks [i]); if (gpe_info) { return (gpe_info); } } /* The gpe_number was not in the range of either FADT GPE block */ return (NULL); } /* A Non-NULL gpe_device means this is a GPE Block Device */ obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) gpe_device); if (!obj_desc || !obj_desc->device.gpe_block) { return (NULL); } return (acpi_ev_low_get_gpe_info (gpe_number, obj_desc->device.gpe_block)); } /******************************************************************************* * * FUNCTION: acpi_ev_gpe_detect * * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt. * Can have multiple GPE blocks attached. * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Detect if any GP events have occurred. This function is * executed at interrupt level. * ******************************************************************************/ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) { acpi_status status; struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_register_info *gpe_register_info; u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; u8 enabled_status_byte; u32 status_reg; u32 enable_reg; acpi_cpu_flags flags; u32 i; u32 j; ACPI_FUNCTION_NAME(ev_gpe_detect); /* Check for the case where there are no GPEs */ if (!gpe_xrupt_list) { return (int_status); } /* * We need to obtain the GPE lock for both the data structs and registers * Note: Not necessary to obtain the hardware lock, since the GPE * registers are owned by the gpe_lock. */ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Examine all GPE blocks attached to this interrupt level */ gpe_block = gpe_xrupt_list->gpe_block_list_head; while (gpe_block) { /* * Read all of the 8-bit GPE status and enable registers in this GPE * block, saving all of them. Find all currently active GP events. */ for (i = 0; i < gpe_block->register_count; i++) { /* Get the next status/enable pair */ gpe_register_info = &gpe_block->register_info[i]; /* * Optimization: If there are no GPEs enabled within this * register, we can safely ignore the entire register. */ if (!(gpe_register_info->enable_for_run | gpe_register_info->enable_for_wake)) { continue; } /* Read the Status Register */ status = acpi_hw_read(&status_reg, &gpe_register_info->status_address); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* Read the Enable Register */ status = acpi_hw_read(&enable_reg, &gpe_register_info->enable_address); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n", gpe_register_info->base_gpe_number, status_reg, enable_reg)); /* Check if there is anything active at all in this register */ enabled_status_byte = (u8) (status_reg & enable_reg); if (!enabled_status_byte) { /* No active GPEs in this register, move on */ continue; } /* Now look at the individual GPEs in this byte register */ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { /* Examine one GPE bit */ if (enabled_status_byte & (1 << j)) { /* * Found an active GPE. Dispatch the event to a handler * or method. */ int_status |= acpi_ev_gpe_dispatch(gpe_block-> node, &gpe_block-> event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); } } } gpe_block = gpe_block->next; } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return (int_status); } /******************************************************************************* * * FUNCTION: acpi_ev_asynch_execute_gpe_method * * PARAMETERS: Context (gpe_event_info) - Info for this GPE * * RETURN: None * * DESCRIPTION: Perform the actual execution of a GPE control method. This * function is called from an invocation of acpi_os_execute and * therefore does NOT execute at interrupt level - so that * the control method itself is not executed in the context of * an interrupt handler. * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; acpi_status status; struct acpi_gpe_event_info *local_gpe_event_info; struct acpi_evaluate_info *info; struct acpi_gpe_notify_object *notify_object; ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); /* Allocate a local GPE block */ local_gpe_event_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info)); if (!local_gpe_event_info) { ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE")); return_VOID; } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { ACPI_FREE(local_gpe_event_info); return_VOID; } /* Must revalidate the gpe_number/gpe_block */ if (!acpi_ev_valid_gpe_event(gpe_event_info)) { status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); ACPI_FREE(local_gpe_event_info); return_VOID; } /* * Take a snapshot of the GPE info for this level - we copy the info to * prevent a race condition with remove_handler/remove_block. */ ACPI_MEMCPY(local_gpe_event_info, gpe_event_info, sizeof(struct acpi_gpe_event_info)); status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_VOID; } /* Do the correct dispatch - normal method or implicit notify */ switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_NOTIFY: /* * Implicit notify. * Dispatch a DEVICE_WAKE notify to the appropriate handler. * NOTE: the request is queued for execution after this method * completes. The notify handlers are NOT invoked synchronously * from this thread -- because handlers may in turn run other * control methods. */ status = acpi_ev_queue_notify_request( local_gpe_event_info->dispatch.device.node, ACPI_NOTIFY_DEVICE_WAKE); notify_object = local_gpe_event_info->dispatch.device.next; while (ACPI_SUCCESS(status) && notify_object) { status = acpi_ev_queue_notify_request( notify_object->node, ACPI_NOTIFY_DEVICE_WAKE); notify_object = notify_object->next; } break; case ACPI_GPE_DISPATCH_METHOD: /* Allocate the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { status = AE_NO_MEMORY; } else { /* * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx * control method that corresponds to this GPE */ info->prefix_node = local_gpe_event_info->dispatch.method_node; info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info); ACPI_FREE(info); } if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "while evaluating GPE method [%4.4s]", acpi_ut_get_node_name (local_gpe_event_info->dispatch. method_node))); } break; default: return_VOID; /* Should never happen */ } /* Defer enabling of GPE until all notify handlers are done */ status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, local_gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_FREE(local_gpe_event_info); } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ev_asynch_enable_gpe * * PARAMETERS: Context (gpe_event_info) - Info for this GPE * Callback from acpi_os_execute * * RETURN: None * * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to * complete (i.e., finish execution of Notify) * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; (void)acpi_ev_finish_gpe(gpe_event_info); ACPI_FREE(gpe_event_info); return; } /******************************************************************************* * * FUNCTION: acpi_ev_finish_gpe * * PARAMETERS: gpe_event_info - Info for this GPE * * RETURN: Status * * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution * of a GPE method or a synchronous or asynchronous GPE handler. * ******************************************************************************/ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { /* * GPE is level-triggered, we clear the GPE status bit after * handling the event. */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return (status); } } /* * Enable this GPE, conditionally. This means that the GPE will * only be physically enabled if the enable_for_run bit is set * in the event_info. */ (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_gpe_dispatch * * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 * gpe_event_info - Info for this GPE * gpe_number - Number relative to the parent GPE block * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC) * or method (e.g. _Lxx/_Exx) handler. * * This function executes at interrupt level. * ******************************************************************************/ u32 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; u32 return_value; ACPI_FUNCTION_TRACE(ev_gpe_dispatch); /* Invoke global event handler if present */ acpi_gpe_count++; if (acpi_gbl_global_event_handler) { acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device, gpe_number, acpi_gbl_global_event_handler_context); } /* * If edge-triggered, clear the GPE status bit now. Note that * level-triggered events are cleared after the GPE is serviced. */ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE%02X", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } /* * Always disable the GPE so that it does not keep firing before * any asynchronous activity completes (either from the execution * of a GPE method or an asynchronous GPE handler.) * * If there is no handler or method to run, just disable the * GPE and leave it disabled permanently to prevent further such * pointless events from firing. */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE%02X", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } /* * Dispatch the GPE to either an installed handler or the control * method associated with this GPE (_Lxx or _Exx). If a handler * exists, we invoke it and do not attempt to run the method. * If there is neither a handler nor a method, leave the GPE * disabled. */ switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_HANDLER: /* Invoke the installed handler (at interrupt level) */ return_value = gpe_event_info->dispatch.handler->address(gpe_device, gpe_number, gpe_event_info-> dispatch.handler-> context); /* If requested, clear (if level-triggered) and reenable the GPE */ if (return_value & ACPI_REENABLE_GPE) { (void)acpi_ev_finish_gpe(gpe_event_info); } break; case ACPI_GPE_DISPATCH_METHOD: case ACPI_GPE_DISPATCH_NOTIFY: /* * Execute the method associated with the GPE * NOTE: Level-triggered GPEs are cleared after the method completes. */ status = acpi_os_execute(OSL_GPE_HANDLER, acpi_ev_asynch_execute_gpe_method, gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to queue handler for GPE%2X - event disabled", gpe_number)); } break; default: /* * No handler or method to run! * 03/2010: This case should no longer be possible. We will not allow * a GPE to be enabled if it has no handler or method. */ ACPI_ERROR((AE_INFO, "No handler or method for GPE%02X, disabling event", gpe_number)); break; } return_UINT32(ACPI_INTERRUPT_HANDLED); }
gpl-2.0
pec0ra/abricot
drivers/video/msm/lcdc_nt35582_wvga.c
3630
13582
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/delay.h> #include <linux/module.h> #ifdef CONFIG_SPI_QUP #include <linux/spi/spi.h> #endif #include <mach/gpio.h> #include <mach/pmic.h> #include "msm_fb.h" #define LCDC_NT35582_PANEL_NAME "lcdc_nt35582_wvga" #define WRITE_FIRST_TRANS 0x20 #define WRITE_SECOND_TRANS 0x00 #define WRITE_THIRD_TRANS 0x40 #define READ_FIRST_TRANS 0x20 #define READ_SECOND_TRANS 0x00 #define READ_THIRD_TRANS 0xC0 #ifdef CONFIG_SPI_QUP #define LCDC_NT35582_SPI_DEVICE_NAME "lcdc_nt35582_spi" static struct spi_device *spi_client; #endif struct nt35582_state_type { boolean display_on; int bl_level; }; static struct nt35582_state_type nt35582_state = { 0 }; static int gpio_backlight_en; static struct msm_panel_common_pdata *lcdc_nt35582_pdata; static int spi_write_2bytes(struct spi_device *spi, unsigned char reg_high_addr, unsigned char reg_low_addr) { char tx_buf[4]; int rc; struct spi_message m; struct spi_transfer t; memset(&t, 0, sizeof t); t.tx_buf = tx_buf; spi_setup(spi); spi_message_init(&m); spi_message_add_tail(&t, &m); tx_buf[0] = WRITE_FIRST_TRANS; tx_buf[1] = reg_high_addr; tx_buf[2] = WRITE_SECOND_TRANS; tx_buf[3] = reg_low_addr; t.rx_buf = NULL; t.len = 4; t.bits_per_word = 16; rc = spi_sync(spi, &m); if (rc) pr_err("write spi command failed!\n"); return rc; } static int spi_write_3bytes(struct spi_device *spi, unsigned char reg_high_addr, unsigned char reg_low_addr, unsigned char write_data) { char tx_buf[6]; int rc; struct spi_message m; struct spi_transfer t; memset(&t, 0, sizeof t); t.tx_buf = tx_buf; spi_setup(spi); spi_message_init(&m); spi_message_add_tail(&t, &m); tx_buf[0] = WRITE_FIRST_TRANS; tx_buf[1] = reg_high_addr; tx_buf[2] = WRITE_SECOND_TRANS; tx_buf[3] = reg_low_addr; tx_buf[4] = WRITE_THIRD_TRANS; tx_buf[5] = write_data; t.rx_buf = NULL; t.len = 6; t.bits_per_word = 16; rc = spi_sync(spi, &m); if (rc) pr_err("write spi command failed!\n"); return rc; } static int spi_read_bytes(struct spi_device *spi, unsigned char reg_high_addr, unsigned char reg_low_addr, unsigned char *read_value) { char tx_buf[6]; char rx_buf[6]; int rc; struct spi_message m; struct spi_transfer t; memset(&t, 0, sizeof t); t.tx_buf = tx_buf; spi_setup(spi); spi_message_init(&m); spi_message_add_tail(&t, &m); tx_buf[0] = READ_FIRST_TRANS; tx_buf[1] = reg_high_addr; tx_buf[2] = READ_SECOND_TRANS; tx_buf[3] = reg_low_addr; tx_buf[4] = READ_THIRD_TRANS; tx_buf[5] = 0x00; t.rx_buf = rx_buf; t.len = 6; t.bits_per_word = 16; rc = spi_sync(spi, &m); if (rc) pr_err("write spi command failed!\n"); else *read_value = rx_buf[5]; return rc; } static void nt35582_disp_on(void) { uint32 panel_id1 = 0, panel_id2 = 0; if (!nt35582_state.display_on) { /* GVDD setting */ spi_write_3bytes(spi_client, 0xC0, 0x00, 0xC0); spi_write_3bytes(spi_client, 0xC0, 0x01, 0x00); spi_write_3bytes(spi_client, 0xC0, 0x02, 0xC0); spi_write_3bytes(spi_client, 0xC0, 0x03, 0x00); /* Power setting */ spi_write_3bytes(spi_client, 0xC1, 0x00, 0x40); spi_write_3bytes(spi_client, 0xC2, 0x00, 0x21); spi_write_3bytes(spi_client, 0xC2, 0x02, 0x02); /* Gamma setting */ spi_write_3bytes(spi_client, 0xE0, 0x00, 0x0E); spi_write_3bytes(spi_client, 0xE0, 0x01, 0x54); spi_write_3bytes(spi_client, 0xE0, 0x02, 0x63); spi_write_3bytes(spi_client, 0xE0, 0x03, 0x76); spi_write_3bytes(spi_client, 0xE0, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE0, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE0, 0x06, 0x62); spi_write_3bytes(spi_client, 0xE0, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE0, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE0, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE0, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE0, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE0, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE0, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE0, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE0, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE0, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE0, 0x11, 0x57); spi_write_3bytes(spi_client, 0xE1, 0x00, 0x0E); spi_write_3bytes(spi_client, 0xE1, 0x01, 0x54); spi_write_3bytes(spi_client, 0xE1, 0x02, 0x63); spi_write_3bytes(spi_client, 0xE1, 0x03, 0x76); spi_write_3bytes(spi_client, 0xE1, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE1, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE1, 0x06, 0X62); spi_write_3bytes(spi_client, 0xE1, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE1, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE1, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE1, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE1, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE1, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE1, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE1, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE1, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE1, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE1, 0x11, 0x57); spi_write_3bytes(spi_client, 0xE2, 0x00, 0x0E); spi_write_3bytes(spi_client, 0xE2, 0x01, 0x54); spi_write_3bytes(spi_client, 0xE2, 0x02, 0x63); spi_write_3bytes(spi_client, 0xE2, 0x03, 0x76); spi_write_3bytes(spi_client, 0xE2, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE2, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE2, 0x06, 0x62); spi_write_3bytes(spi_client, 0xE2, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE2, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE2, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE2, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE2, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE2, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE2, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE2, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE2, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE2, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE2, 0x11, 0x57); spi_write_3bytes(spi_client, 0xE3, 0x00, 0x0E); spi_write_3bytes(spi_client, 0xE3, 0x01, 0x54); spi_write_3bytes(spi_client, 0xE3, 0x02, 0x63); spi_write_3bytes(spi_client, 0xE3, 0x03, 0x76); spi_write_3bytes(spi_client, 0xE3, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE3, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE3, 0x06, 0x62); spi_write_3bytes(spi_client, 0xE3, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE3, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE3, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE3, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE3, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE3, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE3, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE3, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE3, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE3, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE3, 0x11, 0x57); spi_write_3bytes(spi_client, 0xE4, 0x00, 0x48); spi_write_3bytes(spi_client, 0xE4, 0x01, 0x6B); spi_write_3bytes(spi_client, 0xE4, 0x02, 0x84); spi_write_3bytes(spi_client, 0xE4, 0x03, 0x9B); spi_write_3bytes(spi_client, 0xE4, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE4, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE4, 0x06, 0x62); spi_write_3bytes(spi_client, 0xE4, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE4, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE4, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE4, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE4, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE4, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE4, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE4, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE4, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE4, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE4, 0x11, 0x57); spi_write_3bytes(spi_client, 0xE5, 0x00, 0x48); spi_write_3bytes(spi_client, 0xE5, 0x01, 0x6B); spi_write_3bytes(spi_client, 0xE5, 0x02, 0x84); spi_write_3bytes(spi_client, 0xE5, 0x03, 0x9B); spi_write_3bytes(spi_client, 0xE5, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE5, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE5, 0x06, 0x62); spi_write_3bytes(spi_client, 0xE5, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE5, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE5, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE5, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE5, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE5, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE5, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE5, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE5, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE5, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE5, 0x11, 0x57); /* Data format setting */ spi_write_3bytes(spi_client, 0x3A, 0x00, 0x70); /* Reverse PCLK signal of LCM to meet Qualcomm's platform */ spi_write_3bytes(spi_client, 0x3B, 0x00, 0x2B); /* Scan direstion setting */ spi_write_3bytes(spi_client, 0x36, 0x00, 0x00); /* Sleep out */ spi_write_2bytes(spi_client, 0x11, 0x00); msleep(120); /* Display on */ spi_write_2bytes(spi_client, 0x29, 0x00); pr_info("%s: LCM SPI display on CMD finished...\n", __func__); msleep(200); nt35582_state.display_on = TRUE; } /* Test to read RDDID. It should be 0x0055h and 0x0082h */ spi_read_bytes(spi_client, 0x10, 0x80, (unsigned char *)&panel_id1); spi_read_bytes(spi_client, 0x11, 0x80, (unsigned char *)&panel_id2); pr_info(KERN_INFO "nt35582_disp_on: LCM_ID=[0x%x, 0x%x]\n", panel_id1, panel_id2); } static int lcdc_nt35582_panel_on(struct platform_device *pdev) { nt35582_disp_on(); return 0; } static int lcdc_nt35582_panel_off(struct platform_device *pdev) { nt35582_state.display_on = FALSE; return 0; } static void lcdc_nt35582_set_backlight(struct msm_fb_data_type *mfd) { int bl_level; int i = 0, step = 0; bl_level = mfd->bl_level; if (bl_level == nt35582_state.bl_level) return; else nt35582_state.bl_level = bl_level; if (bl_level == 0) { gpio_set_value_cansleep(gpio_backlight_en, 0); return; } /* Level:0~31 mapping to step 32~1 */ step = 32 - bl_level; for (i = 0; i < step; i++) { gpio_set_value_cansleep(gpio_backlight_en, 0); ndelay(5); gpio_set_value_cansleep(gpio_backlight_en, 1); ndelay(5); } } static int __devinit nt35582_probe(struct platform_device *pdev) { if (pdev->id == 0) { lcdc_nt35582_pdata = pdev->dev.platform_data; return 0; } gpio_backlight_en = *(lcdc_nt35582_pdata->gpio_num); msm_fb_add_device(pdev); return 0; } #ifdef CONFIG_SPI_QUP static int __devinit lcdc_nt35582_spi_probe(struct spi_device *spi) { spi_client = spi; spi_client->bits_per_word = 16; spi_client->chip_select = 0; spi_client->max_speed_hz = 1100000; spi_client->mode = SPI_MODE_0; spi_setup(spi_client); return 0; } static int __devexit lcdc_nt35582_spi_remove(struct spi_device *spi) { spi_client = NULL; return 0; } static struct spi_driver lcdc_nt35582_spi_driver = { .driver = { .name = LCDC_NT35582_SPI_DEVICE_NAME, .owner = THIS_MODULE, }, .probe = lcdc_nt35582_spi_probe, .remove = __devexit_p(lcdc_nt35582_spi_remove), }; #endif static struct platform_driver this_driver = { .probe = nt35582_probe, .driver = { .name = LCDC_NT35582_PANEL_NAME, }, }; static struct msm_fb_panel_data nt35582_panel_data = { .on = lcdc_nt35582_panel_on, .off = lcdc_nt35582_panel_off, .set_backlight = lcdc_nt35582_set_backlight, }; static struct platform_device this_device = { .name = LCDC_NT35582_PANEL_NAME, .id = 1, .dev = { .platform_data = &nt35582_panel_data, } }; static int __init lcdc_nt35582_panel_init(void) { int ret; struct msm_panel_info *pinfo; #ifdef CONFIG_FB_MSM_LCDC_AUTO_DETECT if (msm_fb_detect_client(LCDC_NT35582_PANEL_NAME)) { pr_err("detect failed\n"); return 0; } #endif ret = platform_driver_register(&this_driver); if (ret) { pr_err("Fails to platform_driver_register...\n"); return ret; } pinfo = &nt35582_panel_data.panel_info; pinfo->xres = 480; pinfo->yres = 800; MSM_FB_SINGLE_MODE_PANEL(pinfo); pinfo->type = LCDC_PANEL; pinfo->pdest = DISPLAY_1; pinfo->wait_cycle = 0; pinfo->bpp = 24; pinfo->fb_num = 2; pinfo->clk_rate = 25600000; pinfo->bl_max = 31; pinfo->bl_min = 1; pinfo->lcdc.h_back_porch = 10; /* hsw = 8 + hbp=184 */ pinfo->lcdc.h_front_porch = 10; pinfo->lcdc.h_pulse_width = 2; pinfo->lcdc.v_back_porch = 4; /* vsw=1 + vbp = 2 */ pinfo->lcdc.v_front_porch = 10; pinfo->lcdc.v_pulse_width = 2; pinfo->lcdc.border_clr = 0; /* blk */ pinfo->lcdc.underflow_clr = 0xff; /* blue */ pinfo->lcdc.hsync_skew = 0; ret = platform_device_register(&this_device); if (ret) { pr_err("not able to register the device\n"); goto fail_driver; } #ifdef CONFIG_SPI_QUP ret = spi_register_driver(&lcdc_nt35582_spi_driver); if (ret) { pr_err("not able to register spi\n"); goto fail_device; } #endif return ret; #ifdef CONFIG_SPI_QUP fail_device: platform_device_unregister(&this_device); #endif fail_driver: platform_driver_unregister(&this_driver); return ret; } device_initcall(lcdc_nt35582_panel_init);
gpl-2.0
LeeDroid-/Flyer-2.6.35
mm/mmzone.c
3886
1808
/* * linux/mm/mmzone.c * * management codes for pgdats and zones. */ #include <linux/stddef.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/module.h> struct pglist_data *first_online_pgdat(void) { return NODE_DATA(first_online_node); } struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) { int nid = next_online_node(pgdat->node_id); if (nid == MAX_NUMNODES) return NULL; return NODE_DATA(nid); } /* * next_zone - helper magic for for_each_zone() */ struct zone *next_zone(struct zone *zone) { pg_data_t *pgdat = zone->zone_pgdat; if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) zone++; else { pgdat = next_online_pgdat(pgdat); if (pgdat) zone = pgdat->node_zones; else zone = NULL; } return zone; } static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) { #ifdef CONFIG_NUMA return node_isset(zonelist_node_idx(zref), *nodes); #else return 1; #endif /* CONFIG_NUMA */ } /* Returns the next zone at or below highest_zoneidx in a zonelist */ struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, nodemask_t *nodes, struct zone **zone) { /* * Find the next suitable zone to use for the allocation. * Only filter based on nodemask if it's set */ if (likely(nodes == NULL)) while (zonelist_zone_idx(z) > highest_zoneidx) z++; else while (zonelist_zone_idx(z) > highest_zoneidx || (z->zone && !zref_in_nodemask(z, nodes))) z++; *zone = zonelist_zone(z); return z; } #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL int memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { if (page_to_pfn(page) != pfn) return 0; if (page_zone(page) != zone) return 0; return 1; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
gpl-2.0
hroark13/android_kernel_zte_draconis
arch/sh/kernel/signal_64.c
4398
21266
/* * arch/sh/kernel/signal_64.c * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003 - 2008 Paul Mundt * Copyright (C) 2004 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/personality.h> #include <linux/freezer.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/tracehook.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> #include <asm/fpu.h> #define REG_RET 9 #define REG_ARG1 2 #define REG_ARG2 3 #define REG_ARG3 4 #define REG_SP 15 #define REG_PR 18 #define REF_REG_RET regs->regs[REG_RET] #define REF_REG_SP regs->regs[REG_SP] #define DEREF_REG_PR regs->regs[REG_PR] #define DEBUG_SIG 0 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs); static inline void handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa) { /* If we're not from a syscall, bail out */ if (regs->syscall_nr < 0) return; /* check for system call restart.. */ switch (regs->regs[REG_RET]) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: no_system_call_restart: regs->regs[REG_RET] = -EINTR; break; case -ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; /* fallthrough */ case -ERESTARTNOINTR: /* Decode syscall # */ regs->regs[REG_RET] = regs->syscall_nr; regs->pc -= 4; break; } } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ static int do_signal(struct pt_regs *regs, sigset_t *oldset) { siginfo_t info; int signr; struct k_sigaction ka; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return 1; if (current_thread_info()->status & TS_RESTORE_SIGMASK) oldset = &current->saved_sigmask; else if (!oldset) oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, 0); if (signr > 0) { handle_syscall_restart(regs, &ka.sa); /* Whee! Actually deliver the signal. */ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { /* * If a signal was successfully delivered, the * saved sigmask is in its frame, and we can * clear the TS_RESTORE_SIGMASK flag. */ current_thread_info()->status &= ~TS_RESTORE_SIGMASK; tracehook_signal_handler(signr, &info, &ka, regs, test_thread_flag(TIF_SINGLESTEP)); return 1; } } /* Did we come from a system call? */ if (regs->syscall_nr >= 0) { /* Restart the system call - no handlers present */ switch (regs->regs[REG_RET]) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: /* Decode Syscall # */ regs->regs[REG_RET] = regs->syscall_nr; regs->pc -= 4; break; case -ERESTART_RESTARTBLOCK: regs->regs[REG_RET] = __NR_restart_syscall; regs->pc -= 4; break; } } /* No signal to deliver -- put the saved sigmask back */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) { current_thread_info()->status &= ~TS_RESTORE_SIGMASK; sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } return 0; } /* * Atomically swap in the new signal mask, and wait for a signal. */ asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs * regs) { sigset_t saveset, blocked; saveset = current->blocked; mask &= _BLOCKABLE; siginitset(&blocked, mask); set_current_blocked(&blocked); REF_REG_RET = -EINTR; while (1) { current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); regs->pc += 4; /* because sys_sigreturn decrements the pc */ if (do_signal(regs, &saveset)) { /* pc now points at signal handler. Need to decrement it because entry.S will increment it. */ regs->pc -= 4; return -EINTR; } } } asmlinkage int sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs * regs) { sigset_t saveset, newset; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&newset, unewset, sizeof(newset))) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); saveset = current->blocked; set_current_blocked(&newset); REF_REG_RET = -EINTR; while (1) { current->state = TASK_INTERRUPTIBLE; schedule(); regs->pc += 4; /* because sys_sigreturn decrements the pc */ if (do_signal(regs, &saveset)) { /* pc now points at signal handler. Need to decrement it because entry.S will increment it. */ regs->pc -= 4; return -EINTR; } } } asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) return -EFAULT; __get_user(new_ka.sa.sa_flags, &act->sa_flags); __get_user(mask, &act->sa_mask); siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) return -EFAULT; __put_user(old_ka.sa.sa_flags, &oact->sa_flags); __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } return ret; } asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs * regs) { return do_sigaltstack(uss, uoss, REF_REG_SP); } /* * Do a signal return; undo the signal stack. */ struct sigframe { struct sigcontext sc; unsigned long extramask[_NSIG_WORDS-1]; long long retcode[2]; }; struct rt_sigframe { struct siginfo __user *pinfo; void *puc; struct siginfo info; struct ucontext uc; long long retcode[2]; }; #ifdef CONFIG_SH_FPU static inline int restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; int fpvalid; err |= __get_user (fpvalid, &sc->sc_fpvalid); conditional_used_math(fpvalid); if (! fpvalid) return err; if (current == last_task_used_math) { last_task_used_math = NULL; regs->sr |= SR_FD; } err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0], (sizeof(long long) * 32) + (sizeof(int) * 1)); return err; } static inline int setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; int fpvalid; fpvalid = !!used_math(); err |= __put_user(fpvalid, &sc->sc_fpvalid); if (! fpvalid) return err; if (current == last_task_used_math) { enable_fpu(); save_fpu(current); disable_fpu(); last_task_used_math = NULL; regs->sr |= SR_FD; } err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu, (sizeof(long long) * 32) + (sizeof(int) * 1)); clear_used_math(); return err; } #else static inline int restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) { return 0; } static inline int setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) { return 0; } #endif static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p) { unsigned int err = 0; unsigned long long current_sr, new_sr; #define SR_MASK 0xffff8cfd #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]); COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]); COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]); COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]); COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]); COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]); COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]); COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]); COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]); COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]); COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]); COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]); COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]); COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]); COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]); COPY(regs[60]); COPY(regs[61]); COPY(regs[62]); COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]); COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]); /* Prevent the signal handler manipulating SR in a way that can crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be modified */ current_sr = regs->sr; err |= __get_user(new_sr, &sc->sc_sr); regs->sr &= SR_MASK; regs->sr |= (new_sr & ~SR_MASK); COPY(pc); #undef COPY /* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr * has been restored above.) */ err |= restore_sigcontext_fpu(regs, sc); regs->syscall_nr = -1; /* disable syscall checks */ err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]); return err; } asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs * regs) { struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP; sigset_t set; long long ret; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->sc, &ret)) goto badframe; regs->pc -= 4; return (int) ret; badframe: force_sig(SIGSEGV, current); return 0; } asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs * regs) { struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP; sigset_t set; stack_t __user st; long long ret; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret)) goto badframe; regs->pc -= 4; if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) goto badframe; /* It is more difficult to avoid calling this function than to call it and ignore errors. */ do_sigaltstack(&st, NULL, REF_REG_SP); return (int) ret; badframe: force_sig(SIGSEGV, current); return 0; } /* * Set up a signal frame. */ static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask) { int err = 0; /* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */ err |= setup_sigcontext_fpu(regs, sc); #define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]); COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]); COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]); COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]); COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]); COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]); COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]); COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]); COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]); COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]); COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]); COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]); COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]); COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]); COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]); COPY(regs[60]); COPY(regs[61]); COPY(regs[62]); COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]); COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]); COPY(sr); COPY(pc); #undef COPY err |= __put_user(mask, &sc->oldmask); return err; } /* * Determine which stack to use.. */ static inline void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) { if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) sp = current->sas_ss_sp + current->sas_ss_size; return (void __user *)((sp - frame_size) & -8ul); } void sa_default_restorer(void); /* See comments below */ void sa_default_rt_restorer(void); /* See comments below */ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; int err = 0; int signal; frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; signal = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); /* Give up earlier as i386, in case */ if (err) goto give_sigsegv; if (_NSIG_WORDS > 1) { err |= __copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); } /* Give up earlier as i386, in case */ if (err) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { /* * On SH5 all edited pointers are subject to NEFF */ DEREF_REG_PR = neff_sign_extend((unsigned long) ka->sa.sa_restorer | 0x1); } else { /* * Different approach on SH5. * . Endianness independent asm code gets placed in entry.S . * This is limited to four ASM instructions corresponding * to two long longs in size. * . err checking is done on the else branch only * . flush_icache_range() is called upon __put_user() only * . all edited pointers are subject to NEFF * . being code, linker turns ShMedia bit on, always * dereference index -1. */ DEREF_REG_PR = neff_sign_extend((unsigned long) frame->retcode | 0x01); if (__copy_to_user(frame->retcode, (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0) goto give_sigsegv; /* Cohere the trampoline with the I-cache. */ flush_cache_sigtramp(DEREF_REG_PR-1); } /* * Set up registers for signal handler. * All edited pointers are subject to NEFF. */ regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame); regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ /* FIXME: The glibc profiling support for SH-5 needs to be passed a sigcontext so it can retrieve the PC. At some point during 2003 the glibc support was changed to receive the sigcontext through the 2nd argument, but there are still versions of libc.so in use that use the 3rd argument. Until libc.so is stabilised, pass the sigcontext through both 2nd and 3rd arguments. */ regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc; regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc; regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler); set_fs(USER_DS); /* Broken %016Lx */ pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n", signal, current->comm, current->pid, frame, regs->pc >> 32, regs->pc & 0xffffffff, DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0; int signal; frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; signal = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); /* Give up earlier as i386, in case */ if (err) goto give_sigsegv; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[REG_SP]), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Give up earlier as i386, in case */ if (err) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { /* * On SH5 all edited pointers are subject to NEFF */ DEREF_REG_PR = neff_sign_extend((unsigned long) ka->sa.sa_restorer | 0x1); } else { /* * Different approach on SH5. * . Endianness independent asm code gets placed in entry.S . * This is limited to four ASM instructions corresponding * to two long longs in size. * . err checking is done on the else branch only * . flush_icache_range() is called upon __put_user() only * . all edited pointers are subject to NEFF * . being code, linker turns ShMedia bit on, always * dereference index -1. */ DEREF_REG_PR = neff_sign_extend((unsigned long) frame->retcode | 0x01); if (__copy_to_user(frame->retcode, (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0) goto give_sigsegv; /* Cohere the trampoline with the I-cache. */ flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15); } /* * Set up registers for signal handler. * All edited pointers are subject to NEFF. */ regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame); regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info; regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext; regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler); set_fs(USER_DS); pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n", signal, current->comm, current->pid, frame, regs->pc >> 32, regs->pc & 0xffffffff, DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } /* * OK, we're invoking a handler */ static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs) { int ret; /* Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(sig, ka, info, oldset, regs); else ret = setup_frame(sig, ka, oldset, regs); if (ret == 0) block_sigmask(ka, sig); return ret; } asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs, 0); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } }
gpl-2.0
Bilibox/Linux-3.0.X
fs/hpfs/dnode.c
7982
31600
/* * linux/fs/hpfs/dnode.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * handling directory dnode tree - adding, deleteing & searching for dirents */ #include "hpfs_fn.h" static loff_t get_pos(struct dnode *d, struct hpfs_dirent *fde) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); int i = 1; for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { if (de == fde) return ((loff_t) le32_to_cpu(d->self) << 4) | (loff_t)i; i++; } printk("HPFS: get_pos: not_found\n"); return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1; } void hpfs_add_pos(struct inode *inode, loff_t *pos) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); int i = 0; loff_t **ppos; if (hpfs_inode->i_rddir_off) for (; hpfs_inode->i_rddir_off[i]; i++) if (hpfs_inode->i_rddir_off[i] == pos) return; if (!(i&0x0f)) { if (!(ppos = kmalloc((i+0x11) * sizeof(loff_t*), GFP_NOFS))) { printk("HPFS: out of memory for position list\n"); return; } if (hpfs_inode->i_rddir_off) { memcpy(ppos, hpfs_inode->i_rddir_off, i * sizeof(loff_t)); kfree(hpfs_inode->i_rddir_off); } hpfs_inode->i_rddir_off = ppos; } hpfs_inode->i_rddir_off[i] = pos; hpfs_inode->i_rddir_off[i + 1] = NULL; } void hpfs_del_pos(struct inode *inode, loff_t *pos) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); loff_t **i, **j; if (!hpfs_inode->i_rddir_off) goto not_f; for (i = hpfs_inode->i_rddir_off; *i; i++) if (*i == pos) goto fnd; goto not_f; fnd: for (j = i + 1; *j; j++) ; *i = *(j - 1); *(j - 1) = NULL; if (j - 1 == hpfs_inode->i_rddir_off) { kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } return; not_f: /*printk("HPFS: warning: position pointer %p->%08x not found\n", pos, (int)*pos);*/ return; } static void for_all_poss(struct inode *inode, void (*f)(loff_t *, loff_t, loff_t), loff_t p1, loff_t p2) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); loff_t **i; if (!hpfs_inode->i_rddir_off) return; for (i = hpfs_inode->i_rddir_off; *i; i++) (*f)(*i, p1, p2); return; } static void hpfs_pos_subst(loff_t *p, loff_t f, loff_t t) { if (*p == f) *p = t; } /*void hpfs_hpfs_pos_substd(loff_t *p, loff_t f, loff_t t) { if ((*p & ~0x3f) == (f & ~0x3f)) *p = (t & ~0x3f) | (*p & 0x3f); }*/ static void hpfs_pos_ins(loff_t *p, loff_t d, loff_t c) { if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) { int n = (*p & 0x3f) + c; if (n > 0x3f) printk("HPFS: hpfs_pos_ins: %08x + %d\n", (int)*p, (int)c >> 8); else *p = (*p & ~0x3f) | n; } } static void hpfs_pos_del(loff_t *p, loff_t d, loff_t c) { if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) { int n = (*p & 0x3f) - c; if (n < 1) printk("HPFS: hpfs_pos_ins: %08x - %d\n", (int)*p, (int)c >> 8); else *p = (*p & ~0x3f) | n; } } static struct hpfs_dirent *dnode_pre_last_de(struct dnode *d) { struct hpfs_dirent *de, *de_end, *dee = NULL, *deee = NULL; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { deee = dee; dee = de; } return deee; } static struct hpfs_dirent *dnode_last_de(struct dnode *d) { struct hpfs_dirent *de, *de_end, *dee = NULL; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { dee = de; } return dee; } static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno ptr) { struct hpfs_dirent *de; if (!(de = dnode_last_de(d))) { hpfs_error(s, "set_last_pointer: empty dnode %08x", le32_to_cpu(d->self)); return; } if (hpfs_sb(s)->sb_chk) { if (de->down) { hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x", le32_to_cpu(d->self), de_down_pointer(de)); return; } if (le16_to_cpu(de->length) != 32) { hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", le32_to_cpu(d->self)); return; } } if (ptr) { d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + 4); if (le32_to_cpu(d->first_free) > 2048) { hpfs_error(s, "set_last_pointer: too long dnode %08x", le32_to_cpu(d->self)); d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - 4); return; } de->length = cpu_to_le16(36); de->down = 1; *(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr); } } /* Add an entry to dnode and don't care if it grows over 2048 bytes */ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, const unsigned char *name, unsigned namelen, secno down_ptr) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); unsigned d_size = de_size(namelen, down_ptr); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last); if (!c) { hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, le32_to_cpu(d->self)); return NULL; } if (c < 0) break; } memmove((char *)de + d_size, de, (char *)de_end - (char *)de); memset(de, 0, d_size); if (down_ptr) { *(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr); de->down = 1; } de->length = cpu_to_le16(d_size); de->not_8x3 = hpfs_is_name_long(name, namelen); de->namelen = namelen; memcpy(de->name, name, namelen); d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + d_size); return de; } /* Delete dirent and don't care about its subtree */ static void hpfs_delete_de(struct super_block *s, struct dnode *d, struct hpfs_dirent *de) { if (de->last) { hpfs_error(s, "attempt to delete last dirent in dnode %08x", le32_to_cpu(d->self)); return; } d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - le16_to_cpu(de->length)); memmove(de, de_next_de(de), le32_to_cpu(d->first_free) + (char *)d - (char *)de); } static void fix_up_ptrs(struct super_block *s, struct dnode *d) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); dnode_secno dno = le32_to_cpu(d->self); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) if (de->down) { struct quad_buffer_head qbh; struct dnode *dd; if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) { if (le32_to_cpu(dd->up) != dno || dd->root_dnode) { dd->up = cpu_to_le32(dno); dd->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); } hpfs_brelse4(&qbh); } } } /* Add an entry to dnode and do dnode splitting if required */ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de, dnode_secno down_ptr) { struct quad_buffer_head qbh, qbh1, qbh2; struct dnode *d, *ad, *rd, *nd = NULL; dnode_secno adno, rdno; struct hpfs_dirent *de; struct hpfs_dirent nde; unsigned char *nname; int h; int pos; struct buffer_head *bh; struct fnode *fnode; int c1, c2 = 0; if (!(nname = kmalloc(256, GFP_NOFS))) { printk("HPFS: out of memory, can't add to dnode\n"); return 1; } go_up: if (namelen >= 256) { hpfs_error(i->i_sb, "hpfs_add_to_dnode: namelen == %d", namelen); kfree(nd); kfree(nname); return 1; } if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) { kfree(nd); kfree(nname); return 1; } go_up_a: if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_to_dnode")) { hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 1; } if (le32_to_cpu(d->first_free) + de_size(namelen, down_ptr) <= 2048) { loff_t t; copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de); t = get_pos(d, de); for_all_poss(i, hpfs_pos_ins, t, 1); for_all_poss(i, hpfs_pos_subst, 4, t); for_all_poss(i, hpfs_pos_subst, 5, t + 1); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 0; } if (!nd) if (!(nd = kmalloc(0x924, GFP_NOFS))) { /* 0x924 is a max size of dnode after adding a dirent with max name length. We alloc this only once. There must not be any error while splitting dnodes, otherwise the whole directory, not only file we're adding, would be lost. */ printk("HPFS: out of memory for dnode splitting\n"); hpfs_brelse4(&qbh); kfree(nname); return 1; } memcpy(nd, d, le32_to_cpu(d->first_free)); copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de); for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1); h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10; if (!(ad = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &adno, &qbh1))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 1; } i->i_size += 2048; i->i_blocks += 4; pos = 1; for (de = dnode_first_de(nd); (char *)de_next_de(de) - (char *)nd < h; de = de_next_de(de)) { copy_de(hpfs_add_de(i->i_sb, ad, de->name, de->namelen, de->down ? de_down_pointer(de) : 0), de); for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, ((loff_t)adno << 4) | pos); pos++; } copy_de(new_de = &nde, de); memcpy(nname, de->name, de->namelen); name = nname; namelen = de->namelen; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4); down_ptr = adno; set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); de = de_next_de(de); memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de); nd->first_free = cpu_to_le32(le32_to_cpu(nd->first_free) - ((char *)de - (char *)nd - 20)); memcpy(d, nd, le32_to_cpu(nd->first_free)); for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos); fix_up_ptrs(i->i_sb, ad); if (!d->root_dnode) { ad->up = d->up; dno = le32_to_cpu(ad->up); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); goto go_up; } if (!(rd = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &rdno, &qbh2))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); kfree(nd); kfree(nname); return 1; } i->i_size += 2048; i->i_blocks += 4; rd->root_dnode = 1; rd->up = d->up; if (!(fnode = hpfs_map_fnode(i->i_sb, le32_to_cpu(d->up), &bh))) { hpfs_free_dnode(i->i_sb, rdno); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); hpfs_brelse4(&qbh2); kfree(nd); kfree(nname); return 1; } fnode->u.external[0].disk_secno = cpu_to_le32(rdno); mark_buffer_dirty(bh); brelse(bh); hpfs_i(i)->i_dno = rdno; d->up = ad->up = cpu_to_le32(rdno); d->root_dnode = ad->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); qbh = qbh2; set_last_pointer(i->i_sb, rd, dno); dno = rdno; d = rd; goto go_up_a; } /* * Add an entry to directory btree. * I hate such crazy directory structure. * It's easy to read but terrible to write. * I wrote this directory code 4 times. * I hope, now it's finally bug-free. */ int hpfs_add_dirent(struct inode *i, const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct dnode *d; struct hpfs_dirent *de, *de_end; struct quad_buffer_head qbh; dnode_secno dno; int c; int c1, c2 = 0; dno = hpfs_inode->i_dno; down: if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_dirent")) return 1; if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 1; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { if (!(c = hpfs_compare_names(i->i_sb, name, namelen, de->name, de->namelen, de->last))) { hpfs_brelse4(&qbh); return -1; } if (c < 0) { if (de->down) { dno = de_down_pointer(de); hpfs_brelse4(&qbh); goto down; } break; } } hpfs_brelse4(&qbh); if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) { c = 1; goto ret; } i->i_version++; c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); ret: return c; } /* * Find dirent with higher name in 'from' subtree and move it to 'to' dnode. * Return the dnode we moved from (to be checked later if it's empty) */ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) { dnode_secno dno, ddno; dnode_secno chk_up = to; struct dnode *dnode; struct quad_buffer_head qbh; struct hpfs_dirent *de, *nde; int a; loff_t t; int c1, c2 = 0; dno = from; while (1) { if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "move_to_top")) return 0; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0; if (hpfs_sb(i->i_sb)->sb_chk) { if (le32_to_cpu(dnode->up) != chk_up) { hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x", dno, chk_up, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh); return 0; } chk_up = dno; } if (!(de = dnode_last_de(dnode))) { hpfs_error(i->i_sb, "move_to_top: dnode %08x has no last de", dno); hpfs_brelse4(&qbh); return 0; } if (!de->down) break; dno = de_down_pointer(de); hpfs_brelse4(&qbh); } while (!(de = dnode_pre_last_de(dnode))) { dnode_secno up = le32_to_cpu(dnode->up); hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; i->i_blocks -= 4; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, 5); if (up == to) return to; if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return 0; if (dnode->root_dnode) { hpfs_error(i->i_sb, "move_to_top: got to root_dnode while moving from %08x to %08x", from, to); hpfs_brelse4(&qbh); return 0; } de = dnode_last_de(dnode); if (!de || !de->down) { hpfs_error(i->i_sb, "move_to_top: dnode %08x doesn't point down to %08x", up, dno); hpfs_brelse4(&qbh); return 0; } dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4); de->length = cpu_to_le16(le16_to_cpu(de->length) - 4); de->down = 0; hpfs_mark_4buffers_dirty(&qbh); dno = up; } t = get_pos(dnode, de); for_all_poss(i, hpfs_pos_subst, t, 4); for_all_poss(i, hpfs_pos_subst, t + 1, 5); if (!(nde = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted"); hpfs_brelse4(&qbh); return 0; } memcpy(nde, de, le16_to_cpu(de->length)); ddno = de->down ? de_down_pointer(de) : 0; hpfs_delete_de(i->i_sb, dnode, de); set_last_pointer(i->i_sb, dnode, ddno); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); a = hpfs_add_to_dnode(i, to, nde->name, nde->namelen, nde, from); kfree(nde); if (a) return 0; return dno; } /* * Check if a dnode is empty and delete it from the tree * (chkdsk doesn't like empty dnodes) */ static void delete_empty_dnode(struct inode *i, dnode_secno dno) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct quad_buffer_head qbh; struct dnode *dnode; dnode_secno down, up, ndown; int p; struct hpfs_dirent *de; int c1, c2 = 0; try_it_again: if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return; if (le32_to_cpu(dnode->first_free) > 56) goto end; if (le32_to_cpu(dnode->first_free) == 52 || le32_to_cpu(dnode->first_free) == 56) { struct hpfs_dirent *de_end; int root = dnode->root_dnode; up = le32_to_cpu(dnode->up); de = dnode_first_de(dnode); down = de->down ? de_down_pointer(de) : 0; if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) { hpfs_error(i->i_sb, "delete_empty_dnode: root dnode %08x is empty", dno); goto end; } hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; i->i_blocks -= 4; if (root) { struct fnode *fnode; struct buffer_head *bh; struct dnode *d1; struct quad_buffer_head qbh1; if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) { hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08lx", dno, up, (unsigned long)i->i_ino); return; } if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = cpu_to_le32(up); d1->root_dnode = 1; hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) { fnode->u.external[0].disk_secno = cpu_to_le32(down); mark_buffer_dirty(bh); brelse(bh); } hpfs_inode->i_dno = down; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, (loff_t) 12); return; } if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return; p = 1; de_end = dnode_end_de(dnode); for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de), p++) if (de->down) if (de_down_pointer(de) == dno) goto fnd; hpfs_error(i->i_sb, "delete_empty_dnode: pointer to dnode %08x not found in dnode %08x", dno, up); goto end; fnd: for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p); if (!down) { de->down = 0; de->length = cpu_to_le16(le16_to_cpu(de->length) - 4); dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4); memmove(de_next_de(de), (char *)de_next_de(de) + 4, (char *)dnode + le32_to_cpu(dnode->first_free) - (char *)de_next_de(de)); } else { struct dnode *d1; struct quad_buffer_head qbh1; *(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4) = down; if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = cpu_to_le32(up); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } } } else { hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, le32_to_cpu(dnode->first_free)); goto end; } if (!de->last) { struct hpfs_dirent *de_next = de_next_de(de); struct hpfs_dirent *de_cp; struct dnode *d1; struct quad_buffer_head qbh1; if (!de_next->down) goto endm; ndown = de_down_pointer(de_next); if (!(de_cp = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); goto endm; } memcpy(de_cp, de, le16_to_cpu(de->length)); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4); for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1); if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) { d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, de_cp->down ? de_down_pointer(de_cp) : 0); /*printk("UP-TO-DNODE: %08x (ndown = %08x, down = %08x, dno = %08x)\n", up, ndown, down, dno);*/ dno = up; kfree(de_cp); goto try_it_again; } else { struct hpfs_dirent *de_prev = dnode_pre_last_de(dnode); struct hpfs_dirent *de_cp; struct dnode *d1; struct quad_buffer_head qbh1; dnode_secno dlp; if (!de_prev) { hpfs_error(i->i_sb, "delete_empty_dnode: empty dnode %08x", up); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); dno = up; goto try_it_again; } if (!de_prev->down) goto endm; ndown = de_down_pointer(de_prev); if ((d1 = hpfs_map_dnode(i->i_sb, ndown, &qbh1))) { struct hpfs_dirent *del = dnode_last_de(d1); dlp = del->down ? de_down_pointer(del) : 0; if (!dlp && down) { if (le32_to_cpu(d1->first_free) > 2044) { if (hpfs_sb(i->i_sb)->sb_chk >= 2) { printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: terminating balancing operation\n"); } hpfs_brelse4(&qbh1); goto endm; } if (hpfs_sb(i->i_sb)->sb_chk >= 2) { printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: goin'on\n"); } del->length = cpu_to_le16(le16_to_cpu(del->length) + 4); del->down = 1; d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) + 4); } if (dlp && !down) { del->length = cpu_to_le16(le16_to_cpu(del->length) - 4); del->down = 0; d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4); } else if (down) *(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down); } else goto endm; if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); hpfs_brelse4(&qbh1); goto endm; } hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); memcpy(de_cp, de_prev, le16_to_cpu(de_prev->length)); hpfs_delete_de(i->i_sb, dnode, de_prev); if (!de_prev->down) { de_prev->length = cpu_to_le16(le16_to_cpu(de_prev->length) + 4); de_prev->down = 1; dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4); } *(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1)); if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) { d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, dlp); dno = up; kfree(de_cp); goto try_it_again; } endm: hpfs_mark_4buffers_dirty(&qbh); end: hpfs_brelse4(&qbh); } /* Delete dirent from directory */ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de, struct quad_buffer_head *qbh, int depth) { struct dnode *dnode = qbh->data; dnode_secno down = 0; loff_t t; if (de->first || de->last) { hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno); hpfs_brelse4(qbh); return 1; } if (de->down) down = de_down_pointer(de); if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) { if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) { hpfs_brelse4(qbh); return 2; } } i->i_version++; for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(qbh); hpfs_brelse4(qbh); if (down) { dnode_secno a = move_to_top(i, down, dno); for_all_poss(i, hpfs_pos_subst, 5, t); if (a) delete_empty_dnode(i, a); return !a; } delete_empty_dnode(i, dno); return 0; } void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes, int *n_subdirs, int *n_items) { struct dnode *dnode; struct quad_buffer_head qbh; struct hpfs_dirent *de; dnode_secno ptr, odno = 0; int c1, c2 = 0; int d1, d2 = 0; go_down: if (n_dnodes) (*n_dnodes)++; if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, dno, &c1, &c2, "hpfs_count_dnodes #1")) return; ptr = 0; go_up: if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && le32_to_cpu(dnode->up) != odno) hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, le32_to_cpu(dnode->up)); de = dnode_first_de(dnode); if (ptr) while(1) { if (de->down) if (de_down_pointer(de) == ptr) goto process_de; if (de->last) { hpfs_brelse4(&qbh); hpfs_error(s, "hpfs_count_dnodes: pointer to dnode %08x not found in dnode %08x, got here from %08x", ptr, dno, odno); return; } de = de_next_de(de); } next_de: if (de->down) { odno = dno; dno = de_down_pointer(de); hpfs_brelse4(&qbh); goto go_down; } process_de: if (!de->first && !de->last && de->directory && n_subdirs) (*n_subdirs)++; if (!de->first && !de->last && n_items) (*n_items)++; if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de; ptr = dno; dno = le32_to_cpu(dnode->up); if (dnode->root_dnode) { hpfs_brelse4(&qbh); return; } hpfs_brelse4(&qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ptr, &d1, &d2, "hpfs_count_dnodes #2")) return; odno = -1; goto go_up; } static struct hpfs_dirent *map_nth_dirent(struct super_block *s, dnode_secno dno, int n, struct quad_buffer_head *qbh, struct dnode **dn) { int i; struct hpfs_dirent *de, *de_end; struct dnode *dnode; dnode = hpfs_map_dnode(s, dno, qbh); if (!dnode) return NULL; if (dn) *dn=dnode; de = dnode_first_de(dnode); de_end = dnode_end_de(dnode); for (i = 1; de < de_end; i++, de = de_next_de(de)) { if (i == n) { return de; } if (de->last) break; } hpfs_brelse4(qbh); hpfs_error(s, "map_nth_dirent: n too high; dnode = %08x, requested %08x", dno, n); return NULL; } dnode_secno hpfs_de_as_down_as_possible(struct super_block *s, dnode_secno dno) { struct quad_buffer_head qbh; dnode_secno d = dno; dnode_secno up = 0; struct hpfs_dirent *de; int c1, c2 = 0; again: if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, d, &c1, &c2, "hpfs_de_as_down_as_possible")) return d; if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno; if (hpfs_sb(s)->sb_chk) if (up && le32_to_cpu(((struct dnode *)qbh.data)->up) != up) hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, le32_to_cpu(((struct dnode *)qbh.data)->up)); if (!de->down) { hpfs_brelse4(&qbh); return d; } up = d; d = de_down_pointer(de); hpfs_brelse4(&qbh); goto again; } struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, struct quad_buffer_head *qbh) { loff_t pos; unsigned c; dnode_secno dno; struct hpfs_dirent *de, *d; struct hpfs_dirent *up_de; struct hpfs_dirent *end_up_de; struct dnode *dnode; struct dnode *up_dnode; struct quad_buffer_head qbh0; pos = *posp; dno = pos >> 6 << 2; pos &= 077; if (!(de = map_nth_dirent(inode->i_sb, dno, pos, qbh, &dnode))) goto bail; /* Going to the next dirent */ if ((d = de_next_de(de)) < dnode_end_de(dnode)) { if (!(++*posp & 077)) { hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08llx", (unsigned long long)*posp); goto bail; } /* We're going down the tree */ if (d->down) { *posp = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, de_down_pointer(d)) << 4) + 1; } return de; } /* Going up */ if (dnode->root_dnode) goto bail; if (!(up_dnode = hpfs_map_dnode(inode->i_sb, le32_to_cpu(dnode->up), &qbh0))) goto bail; end_up_de = dnode_end_de(up_dnode); c = 0; for (up_de = dnode_first_de(up_dnode); up_de < end_up_de; up_de = de_next_de(up_de)) { if (!(++c & 077)) hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", le32_to_cpu(dnode->up)); if (up_de->down && de_down_pointer(up_de) == dno) { *posp = ((loff_t) le32_to_cpu(dnode->up) << 4) + c; hpfs_brelse4(&qbh0); return de; } } hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x", dno, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh0); bail: *posp = 12; return de; } /* Find a dirent in tree */ struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, const unsigned char *name, unsigned len, dnode_secno *dd, struct quad_buffer_head *qbh) { struct dnode *dnode; struct hpfs_dirent *de; struct hpfs_dirent *de_end; int c1, c2 = 0; if (!S_ISDIR(inode->i_mode)) hpfs_error(inode->i_sb, "map_dirent: not a directory\n"); again: if (hpfs_sb(inode->i_sb)->sb_chk) if (hpfs_stop_cycles(inode->i_sb, dno, &c1, &c2, "map_dirent")) return NULL; if (!(dnode = hpfs_map_dnode(inode->i_sb, dno, qbh))) return NULL; de_end = dnode_end_de(dnode); for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de)) { int t = hpfs_compare_names(inode->i_sb, name, len, de->name, de->namelen, de->last); if (!t) { if (dd) *dd = dno; return de; } if (t < 0) { if (de->down) { dno = de_down_pointer(de); hpfs_brelse4(qbh); goto again; } break; } } hpfs_brelse4(qbh); return NULL; } /* * Remove empty directory. In normal cases it is only one dnode with two * entries, but we must handle also such obscure cases when it's a tree * of empty dnodes. */ void hpfs_remove_dtree(struct super_block *s, dnode_secno dno) { struct quad_buffer_head qbh; struct dnode *dnode; struct hpfs_dirent *de; dnode_secno d1, d2, rdno = dno; while (1) { if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; de = dnode_first_de(dnode); if (de->last) { if (de->down) d1 = de_down_pointer(de); else goto error; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); dno = d1; } else break; } if (!de->first) goto error; d1 = de->down ? de_down_pointer(de) : 0; de = de_next_de(de); if (!de->last) goto error; d2 = de->down ? de_down_pointer(de) : 0; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); do { while (d1) { if (!(dnode = hpfs_map_dnode(s, dno = d1, &qbh))) return; de = dnode_first_de(dnode); if (!de->last) goto error; d1 = de->down ? de_down_pointer(de) : 0; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); } d1 = d2; d2 = 0; } while (d1); return; error: hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); hpfs_error(s, "directory %08x is corrupted or not empty", rdno); } /* * Find dirent for specified fnode. Use truncated 15-char name in fnode as * a help for searching. */ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, struct fnode *f, struct quad_buffer_head *qbh) { unsigned char *name1; unsigned char *name2; int name1len, name2len; struct dnode *d; dnode_secno dno, downd; struct fnode *upf; struct buffer_head *bh; struct hpfs_dirent *de, *de_end; int c; int c1, c2 = 0; int d1, d2 = 0; name1 = f->name; if (!(name2 = kmalloc(256, GFP_NOFS))) { printk("HPFS: out of memory, can't map dirent\n"); return NULL; } if (f->len <= 15) memcpy(name2, name1, name1len = name2len = f->len); else { memcpy(name2, name1, 15); memset(name2 + 15, 0xff, 256 - 15); /*name2[15] = 0xff;*/ name1len = 15; name2len = 256; } if (!(upf = hpfs_map_fnode(s, le32_to_cpu(f->up), &bh))) { kfree(name2); return NULL; } if (!upf->dirflag) { brelse(bh); hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up)); kfree(name2); return NULL; } dno = le32_to_cpu(upf->u.external[0].disk_secno); brelse(bh); go_down: downd = 0; go_up: if (!(d = hpfs_map_dnode(s, dno, qbh))) { kfree(name2); return NULL; } de_end = dnode_end_de(d); de = dnode_first_de(d); if (downd) { while (de < de_end) { if (de->down) if (de_down_pointer(de) == downd) goto f; de = de_next_de(de); } hpfs_error(s, "pointer to dnode %08x not found in dnode %08x", downd, dno); hpfs_brelse4(qbh); kfree(name2); return NULL; } next_de: if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } c = hpfs_compare_names(s, name1, name1len, de->name, de->namelen, de->last); if (c < 0 && de->down) { dno = de_down_pointer(de); hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, dno, &c1, &c2, "map_fnode_dirent #1")) { kfree(name2); return NULL; } goto go_down; } f: if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } c = hpfs_compare_names(s, name2, name2len, de->name, de->namelen, de->last); if (c < 0 && !de->last) goto not_found; if ((de = de_next_de(de)) < de_end) goto next_de; if (d->root_dnode) goto not_found; downd = dno; dno = le32_to_cpu(d->up); hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) { kfree(name2); return NULL; } goto go_up; not_found: hpfs_brelse4(qbh); hpfs_error(s, "dirent for fnode %08x not found", fno); kfree(name2); return NULL; }
gpl-2.0
talnoah/m8
drivers/media/radio/wl128x/fmdrv_tx.c
7982
9306
/* * FM Driver for Connectivity chip of Texas Instruments. * This sub-module of FM driver implements FM TX functionality. * * Copyright (C) 2011 Texas Instruments * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include "fmdrv.h" #include "fmdrv_common.h" #include "fmdrv_tx.h" int fm_tx_set_stereo_mono(struct fmdev *fmdev, u16 mode) { u16 payload; int ret; if (fmdev->tx_data.aud_mode == mode) return 0; fmdbg("stereo mode: %d\n", mode); /* Set Stereo/Mono mode */ payload = (1 - mode); ret = fmc_send_cmd(fmdev, MONO_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; fmdev->tx_data.aud_mode = mode; return ret; } static int set_rds_text(struct fmdev *fmdev, u8 *rds_text) { u16 payload; int ret; ret = fmc_send_cmd(fmdev, RDS_DATA_SET, REG_WR, rds_text, strlen(rds_text), NULL, NULL); if (ret < 0) return ret; /* Scroll mode */ payload = (u16)0x1; ret = fmc_send_cmd(fmdev, DISPLAY_MODE, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } static int set_rds_data_mode(struct fmdev *fmdev, u8 mode) { u16 payload; int ret; /* Setting unique PI TODO: how unique? */ payload = (u16)0xcafe; ret = fmc_send_cmd(fmdev, PI_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* Set decoder id */ payload = (u16)0xa; ret = fmc_send_cmd(fmdev, DI_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: RDS_MODE_GET? */ return 0; } static int set_rds_len(struct fmdev *fmdev, u8 type, u16 len) { u16 payload; int ret; len |= type << 8; payload = len; ret = fmc_send_cmd(fmdev, RDS_CONFIG_DATA_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: LENGTH_GET? */ return 0; } int fm_tx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis) { u16 payload; int ret; u8 rds_text[] = "Zoom2\n"; fmdbg("rds_en_dis:%d(E:%d, D:%d)\n", rds_en_dis, FM_RDS_ENABLE, FM_RDS_DISABLE); if (rds_en_dis == FM_RDS_ENABLE) { /* Set RDS length */ set_rds_len(fmdev, 0, strlen(rds_text)); /* Set RDS text */ set_rds_text(fmdev, rds_text); /* Set RDS mode */ set_rds_data_mode(fmdev, 0x0); } /* Send command to enable RDS */ if (rds_en_dis == FM_RDS_ENABLE) payload = 0x01; else payload = 0x00; ret = fmc_send_cmd(fmdev, RDS_DATA_ENB, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; if (rds_en_dis == FM_RDS_ENABLE) { /* Set RDS length */ set_rds_len(fmdev, 0, strlen(rds_text)); /* Set RDS text */ set_rds_text(fmdev, rds_text); } fmdev->tx_data.rds.flag = rds_en_dis; return 0; } int fm_tx_set_radio_text(struct fmdev *fmdev, u8 *rds_text, u8 rds_type) { u16 payload; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; fm_tx_set_rds_mode(fmdev, 0); /* Set RDS length */ set_rds_len(fmdev, rds_type, strlen(rds_text)); /* Set RDS text */ set_rds_text(fmdev, rds_text); /* Set RDS mode */ set_rds_data_mode(fmdev, 0x0); payload = 1; ret = fmc_send_cmd(fmdev, RDS_DATA_ENB, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } int fm_tx_set_af(struct fmdev *fmdev, u32 af) { u16 payload; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; fmdbg("AF: %d\n", af); af = (af - 87500) / 100; payload = (u16)af; ret = fmc_send_cmd(fmdev, TA_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } int fm_tx_set_region(struct fmdev *fmdev, u8 region) { u16 payload; int ret; if (region != FM_BAND_EUROPE_US && region != FM_BAND_JAPAN) { fmerr("Invalid band\n"); return -EINVAL; } /* Send command to set the band */ payload = (u16)region; ret = fmc_send_cmd(fmdev, TX_BAND_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } int fm_tx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset) { u16 payload; int ret; fmdbg("tx: mute mode %d\n", mute_mode_toset); payload = mute_mode_toset; ret = fmc_send_cmd(fmdev, MUTE, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } /* Set TX Audio I/O */ static int set_audio_io(struct fmdev *fmdev) { struct fmtx_data *tx = &fmdev->tx_data; u16 payload; int ret; /* Set Audio I/O Enable */ payload = tx->audio_io; ret = fmc_send_cmd(fmdev, AUDIO_IO_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: is audio set? */ return 0; } /* Start TX Transmission */ static int enable_xmit(struct fmdev *fmdev, u8 new_xmit_state) { struct fmtx_data *tx = &fmdev->tx_data; unsigned long timeleft; u16 payload; int ret; /* Enable POWER_ENB interrupts */ payload = FM_POW_ENB_EVENT; ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* Set Power Enable */ payload = new_xmit_state; ret = fmc_send_cmd(fmdev, POWER_ENB_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* Wait for Power Enabled */ init_completion(&fmdev->maintask_comp); timeleft = wait_for_completion_timeout(&fmdev->maintask_comp, FM_DRV_TX_TIMEOUT); if (!timeleft) { fmerr("Timeout(%d sec),didn't get tune ended interrupt\n", jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000); return -ETIMEDOUT; } set_bit(FM_CORE_TX_XMITING, &fmdev->flag); tx->xmit_state = new_xmit_state; return 0; } /* Set TX power level */ int fm_tx_set_pwr_lvl(struct fmdev *fmdev, u8 new_pwr_lvl) { u16 payload; struct fmtx_data *tx = &fmdev->tx_data; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; fmdbg("tx: pwr_level_to_set %ld\n", (long int)new_pwr_lvl); /* If the core isn't ready update global variable */ if (!test_bit(FM_CORE_READY, &fmdev->flag)) { tx->pwr_lvl = new_pwr_lvl; return 0; } /* Set power level: Application will specify power level value in * units of dB/uV, whereas range and step are specific to FM chip. * For TI's WL chips, convert application specified power level value * to chip specific value by subtracting 122 from it. Refer to TI FM * data sheet for details. * */ payload = (FM_PWR_LVL_HIGH - new_pwr_lvl); ret = fmc_send_cmd(fmdev, POWER_LEV_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: is the power level set? */ tx->pwr_lvl = new_pwr_lvl; return 0; } /* * Sets FM TX pre-emphasis filter value (OFF, 50us, or 75us) * Convert V4L2 specified filter values to chip specific filter values. */ int fm_tx_set_preemph_filter(struct fmdev *fmdev, u32 preemphasis) { struct fmtx_data *tx = &fmdev->tx_data; u16 payload; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; switch (preemphasis) { case V4L2_PREEMPHASIS_DISABLED: payload = FM_TX_PREEMPH_OFF; break; case V4L2_PREEMPHASIS_50_uS: payload = FM_TX_PREEMPH_50US; break; case V4L2_PREEMPHASIS_75_uS: payload = FM_TX_PREEMPH_75US; break; } ret = fmc_send_cmd(fmdev, PREMPH_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; tx->preemph = payload; return ret; } /* Get the TX tuning capacitor value.*/ int fm_tx_get_tune_cap_val(struct fmdev *fmdev) { u16 curr_val; u32 resp_len; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; ret = fmc_send_cmd(fmdev, READ_FMANT_TUNE_VALUE, REG_RD, NULL, sizeof(curr_val), &curr_val, &resp_len); if (ret < 0) return ret; curr_val = be16_to_cpu(curr_val); return curr_val; } /* Set TX Frequency */ int fm_tx_set_freq(struct fmdev *fmdev, u32 freq_to_set) { struct fmtx_data *tx = &fmdev->tx_data; u16 payload, chanl_index; int ret; if (test_bit(FM_CORE_TX_XMITING, &fmdev->flag)) { enable_xmit(fmdev, 0); clear_bit(FM_CORE_TX_XMITING, &fmdev->flag); } /* Enable FR, BL interrupts */ payload = (FM_FR_EVENT | FM_BL_EVENT); ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; tx->tx_frq = (unsigned long)freq_to_set; fmdbg("tx: freq_to_set %ld\n", (long int)tx->tx_frq); chanl_index = freq_to_set / 10; /* Set current tuner channel */ payload = chanl_index; ret = fmc_send_cmd(fmdev, CHANL_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; fm_tx_set_pwr_lvl(fmdev, tx->pwr_lvl); fm_tx_set_preemph_filter(fmdev, tx->preemph); tx->audio_io = 0x01; /* I2S */ set_audio_io(fmdev); enable_xmit(fmdev, 0x01); /* Enable transmission */ tx->aud_mode = FM_STEREO_MODE; tx->rds.flag = FM_RDS_DISABLE; return 0; }
gpl-2.0
pierdebeer/AudaxPlus_Kernel
drivers/media/radio/wl128x/fmdrv_tx.c
7982
9306
/* * FM Driver for Connectivity chip of Texas Instruments. * This sub-module of FM driver implements FM TX functionality. * * Copyright (C) 2011 Texas Instruments * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include "fmdrv.h" #include "fmdrv_common.h" #include "fmdrv_tx.h" int fm_tx_set_stereo_mono(struct fmdev *fmdev, u16 mode) { u16 payload; int ret; if (fmdev->tx_data.aud_mode == mode) return 0; fmdbg("stereo mode: %d\n", mode); /* Set Stereo/Mono mode */ payload = (1 - mode); ret = fmc_send_cmd(fmdev, MONO_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; fmdev->tx_data.aud_mode = mode; return ret; } static int set_rds_text(struct fmdev *fmdev, u8 *rds_text) { u16 payload; int ret; ret = fmc_send_cmd(fmdev, RDS_DATA_SET, REG_WR, rds_text, strlen(rds_text), NULL, NULL); if (ret < 0) return ret; /* Scroll mode */ payload = (u16)0x1; ret = fmc_send_cmd(fmdev, DISPLAY_MODE, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } static int set_rds_data_mode(struct fmdev *fmdev, u8 mode) { u16 payload; int ret; /* Setting unique PI TODO: how unique? */ payload = (u16)0xcafe; ret = fmc_send_cmd(fmdev, PI_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* Set decoder id */ payload = (u16)0xa; ret = fmc_send_cmd(fmdev, DI_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: RDS_MODE_GET? */ return 0; } static int set_rds_len(struct fmdev *fmdev, u8 type, u16 len) { u16 payload; int ret; len |= type << 8; payload = len; ret = fmc_send_cmd(fmdev, RDS_CONFIG_DATA_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: LENGTH_GET? */ return 0; } int fm_tx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis) { u16 payload; int ret; u8 rds_text[] = "Zoom2\n"; fmdbg("rds_en_dis:%d(E:%d, D:%d)\n", rds_en_dis, FM_RDS_ENABLE, FM_RDS_DISABLE); if (rds_en_dis == FM_RDS_ENABLE) { /* Set RDS length */ set_rds_len(fmdev, 0, strlen(rds_text)); /* Set RDS text */ set_rds_text(fmdev, rds_text); /* Set RDS mode */ set_rds_data_mode(fmdev, 0x0); } /* Send command to enable RDS */ if (rds_en_dis == FM_RDS_ENABLE) payload = 0x01; else payload = 0x00; ret = fmc_send_cmd(fmdev, RDS_DATA_ENB, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; if (rds_en_dis == FM_RDS_ENABLE) { /* Set RDS length */ set_rds_len(fmdev, 0, strlen(rds_text)); /* Set RDS text */ set_rds_text(fmdev, rds_text); } fmdev->tx_data.rds.flag = rds_en_dis; return 0; } int fm_tx_set_radio_text(struct fmdev *fmdev, u8 *rds_text, u8 rds_type) { u16 payload; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; fm_tx_set_rds_mode(fmdev, 0); /* Set RDS length */ set_rds_len(fmdev, rds_type, strlen(rds_text)); /* Set RDS text */ set_rds_text(fmdev, rds_text); /* Set RDS mode */ set_rds_data_mode(fmdev, 0x0); payload = 1; ret = fmc_send_cmd(fmdev, RDS_DATA_ENB, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } int fm_tx_set_af(struct fmdev *fmdev, u32 af) { u16 payload; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; fmdbg("AF: %d\n", af); af = (af - 87500) / 100; payload = (u16)af; ret = fmc_send_cmd(fmdev, TA_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } int fm_tx_set_region(struct fmdev *fmdev, u8 region) { u16 payload; int ret; if (region != FM_BAND_EUROPE_US && region != FM_BAND_JAPAN) { fmerr("Invalid band\n"); return -EINVAL; } /* Send command to set the band */ payload = (u16)region; ret = fmc_send_cmd(fmdev, TX_BAND_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } int fm_tx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset) { u16 payload; int ret; fmdbg("tx: mute mode %d\n", mute_mode_toset); payload = mute_mode_toset; ret = fmc_send_cmd(fmdev, MUTE, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; return 0; } /* Set TX Audio I/O */ static int set_audio_io(struct fmdev *fmdev) { struct fmtx_data *tx = &fmdev->tx_data; u16 payload; int ret; /* Set Audio I/O Enable */ payload = tx->audio_io; ret = fmc_send_cmd(fmdev, AUDIO_IO_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: is audio set? */ return 0; } /* Start TX Transmission */ static int enable_xmit(struct fmdev *fmdev, u8 new_xmit_state) { struct fmtx_data *tx = &fmdev->tx_data; unsigned long timeleft; u16 payload; int ret; /* Enable POWER_ENB interrupts */ payload = FM_POW_ENB_EVENT; ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* Set Power Enable */ payload = new_xmit_state; ret = fmc_send_cmd(fmdev, POWER_ENB_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* Wait for Power Enabled */ init_completion(&fmdev->maintask_comp); timeleft = wait_for_completion_timeout(&fmdev->maintask_comp, FM_DRV_TX_TIMEOUT); if (!timeleft) { fmerr("Timeout(%d sec),didn't get tune ended interrupt\n", jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000); return -ETIMEDOUT; } set_bit(FM_CORE_TX_XMITING, &fmdev->flag); tx->xmit_state = new_xmit_state; return 0; } /* Set TX power level */ int fm_tx_set_pwr_lvl(struct fmdev *fmdev, u8 new_pwr_lvl) { u16 payload; struct fmtx_data *tx = &fmdev->tx_data; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; fmdbg("tx: pwr_level_to_set %ld\n", (long int)new_pwr_lvl); /* If the core isn't ready update global variable */ if (!test_bit(FM_CORE_READY, &fmdev->flag)) { tx->pwr_lvl = new_pwr_lvl; return 0; } /* Set power level: Application will specify power level value in * units of dB/uV, whereas range and step are specific to FM chip. * For TI's WL chips, convert application specified power level value * to chip specific value by subtracting 122 from it. Refer to TI FM * data sheet for details. * */ payload = (FM_PWR_LVL_HIGH - new_pwr_lvl); ret = fmc_send_cmd(fmdev, POWER_LEV_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; /* TODO: is the power level set? */ tx->pwr_lvl = new_pwr_lvl; return 0; } /* * Sets FM TX pre-emphasis filter value (OFF, 50us, or 75us) * Convert V4L2 specified filter values to chip specific filter values. */ int fm_tx_set_preemph_filter(struct fmdev *fmdev, u32 preemphasis) { struct fmtx_data *tx = &fmdev->tx_data; u16 payload; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; switch (preemphasis) { case V4L2_PREEMPHASIS_DISABLED: payload = FM_TX_PREEMPH_OFF; break; case V4L2_PREEMPHASIS_50_uS: payload = FM_TX_PREEMPH_50US; break; case V4L2_PREEMPHASIS_75_uS: payload = FM_TX_PREEMPH_75US; break; } ret = fmc_send_cmd(fmdev, PREMPH_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; tx->preemph = payload; return ret; } /* Get the TX tuning capacitor value.*/ int fm_tx_get_tune_cap_val(struct fmdev *fmdev) { u16 curr_val; u32 resp_len; int ret; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; ret = fmc_send_cmd(fmdev, READ_FMANT_TUNE_VALUE, REG_RD, NULL, sizeof(curr_val), &curr_val, &resp_len); if (ret < 0) return ret; curr_val = be16_to_cpu(curr_val); return curr_val; } /* Set TX Frequency */ int fm_tx_set_freq(struct fmdev *fmdev, u32 freq_to_set) { struct fmtx_data *tx = &fmdev->tx_data; u16 payload, chanl_index; int ret; if (test_bit(FM_CORE_TX_XMITING, &fmdev->flag)) { enable_xmit(fmdev, 0); clear_bit(FM_CORE_TX_XMITING, &fmdev->flag); } /* Enable FR, BL interrupts */ payload = (FM_FR_EVENT | FM_BL_EVENT); ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; tx->tx_frq = (unsigned long)freq_to_set; fmdbg("tx: freq_to_set %ld\n", (long int)tx->tx_frq); chanl_index = freq_to_set / 10; /* Set current tuner channel */ payload = chanl_index; ret = fmc_send_cmd(fmdev, CHANL_SET, REG_WR, &payload, sizeof(payload), NULL, NULL); if (ret < 0) return ret; fm_tx_set_pwr_lvl(fmdev, tx->pwr_lvl); fm_tx_set_preemph_filter(fmdev, tx->preemph); tx->audio_io = 0x01; /* I2S */ set_audio_io(fmdev); enable_xmit(fmdev, 0x01); /* Enable transmission */ tx->aud_mode = FM_STEREO_MODE; tx->rds.flag = FM_RDS_DISABLE; return 0; }
gpl-2.0
CyanogenMod/android_kernel_samsung_jf
arch/blackfin/mach-common/clocks-init.c
8494
2546
/* * arch/blackfin/mach-common/clocks-init.c - reprogram clocks / memory * * Copyright 2004-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/blackfin.h> #include <asm/dma.h> #include <asm/clocks.h> #include <asm/mem_init.h> #include <asm/dpmc.h> #define SDGCTL_WIDTH (1 << 31) /* SDRAM external data path width */ #define PLL_CTL_VAL \ (((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \ (PLL_BYPASS << 8) | (ANOMALY_05000305 ? 0 : 0x8000)) __attribute__((l1_text)) static void do_sync(void) { __builtin_bfin_ssync(); } __attribute__((l1_text)) void init_clocks(void) { /* Kill any active DMAs as they may trigger external memory accesses * in the middle of reprogramming things, and that'll screw us up. * For example, any automatic DMAs left by U-Boot for splash screens. */ size_t i; for (i = 0; i < MAX_DMA_CHANNELS; ++i) { struct dma_register *dma = dma_io_base_addr[i]; dma->cfg = 0; } do_sync(); #ifdef SIC_IWR0 bfin_write_SIC_IWR0(IWR_ENABLE(0)); # ifdef SIC_IWR1 /* BF52x system reset does not properly reset SIC_IWR1 which * will screw up the bootrom as it relies on MDMA0/1 waking it * up from IDLE instructions. See this report for more info: * http://blackfin.uclinux.org/gf/tracker/4323 */ if (ANOMALY_05000435) bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11)); else bfin_write_SIC_IWR1(IWR_DISABLE_ALL); # endif # ifdef SIC_IWR2 bfin_write_SIC_IWR2(IWR_DISABLE_ALL); # endif #else bfin_write_SIC_IWR(IWR_ENABLE(0)); #endif do_sync(); #ifdef EBIU_SDGCTL bfin_write_EBIU_SDGCTL(bfin_read_EBIU_SDGCTL() | SRFS); do_sync(); #endif #ifdef CLKBUFOE bfin_write16(VR_CTL, bfin_read_VR_CTL() | CLKBUFOE); do_sync(); __asm__ __volatile__("IDLE;"); #endif bfin_write_PLL_LOCKCNT(0x300); do_sync(); /* We always write PLL_CTL thus avoiding Anomaly 05000242 */ bfin_write16(PLL_CTL, PLL_CTL_VAL); __asm__ __volatile__("IDLE;"); bfin_write_PLL_DIV(CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV); #ifdef EBIU_SDGCTL bfin_write_EBIU_SDRRC(mem_SDRRC); bfin_write_EBIU_SDGCTL((bfin_read_EBIU_SDGCTL() & SDGCTL_WIDTH) | mem_SDGCTL); #else bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() & ~(SRREQ)); do_sync(); bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() | 0x1); bfin_write_EBIU_DDRCTL0(mem_DDRCTL0); bfin_write_EBIU_DDRCTL1(mem_DDRCTL1); bfin_write_EBIU_DDRCTL2(mem_DDRCTL2); #ifdef CONFIG_MEM_EBIU_DDRQUE bfin_write_EBIU_DDRQUE(CONFIG_MEM_EBIU_DDRQUE); #endif #endif do_sync(); bfin_read16(0); }
gpl-2.0
bedalus/nexus4
drivers/net/wireless/rtl818x/rtl8180/grf5101.c
8494
5101
/* * Radio tuning for GCT GRF5101 on RTL8180 * * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> * * Code from the BSD driver and the rtl8181 project have been * very useful to understand certain things * * I want to thanks the Authors of such projects and the Ndiswrapper * project Authors. * * A special Big Thanks also is for all people who donated me cards, * making possible the creation of the original rtl8180 driver * from which this code is derived! * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/delay.h> #include <net/mac80211.h> #include "rtl8180.h" #include "grf5101.h" static const int grf5101_encode[] = { 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE, 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF }; static void write_grf5101(struct ieee80211_hw *dev, u8 addr, u32 data) { struct rtl8180_priv *priv = dev->priv; u32 phy_config; phy_config = grf5101_encode[(data >> 8) & 0xF]; phy_config |= grf5101_encode[(data >> 4) & 0xF] << 4; phy_config |= grf5101_encode[data & 0xF] << 8; phy_config |= grf5101_encode[(addr >> 1) & 0xF] << 12; phy_config |= (addr & 1) << 16; phy_config |= grf5101_encode[(data & 0xf000) >> 12] << 24; /* MAC will bang bits to the chip */ phy_config |= 0x90000000; rtl818x_iowrite32(priv, (__le32 __iomem *) &priv->map->RFPinsOutput, phy_config); msleep(3); } static void grf5101_write_phy_antenna(struct ieee80211_hw *dev, short chan) { struct rtl8180_priv *priv = dev->priv; u8 ant = GRF5101_ANTENNA; if (priv->rfparam & RF_PARAM_ANTBDEFAULT) ant |= BB_ANTENNA_B; if (chan == 14) ant |= BB_ANTATTEN_CHAN14; rtl8180_write_phy(dev, 0x10, ant); } static u8 grf5101_rf_calc_rssi(u8 agc, u8 sq) { if (agc > 60) return 65; /* TODO(?): just return agc (or agc + 5) to avoid mult / div */ return 65 * agc / 60; } static void grf5101_rf_set_channel(struct ieee80211_hw *dev, struct ieee80211_conf *conf) { struct rtl8180_priv *priv = dev->priv; int channel = ieee80211_frequency_to_channel(conf->channel->center_freq); u32 txpw = priv->channels[channel - 1].hw_value & 0xFF; u32 chan = channel - 1; /* set TX power */ write_grf5101(dev, 0x15, 0x0); write_grf5101(dev, 0x06, txpw); write_grf5101(dev, 0x15, 0x10); write_grf5101(dev, 0x15, 0x0); /* set frequency */ write_grf5101(dev, 0x07, 0x0); write_grf5101(dev, 0x0B, chan); write_grf5101(dev, 0x07, 0x1000); grf5101_write_phy_antenna(dev, channel); } static void grf5101_rf_stop(struct ieee80211_hw *dev) { struct rtl8180_priv *priv = dev->priv; u32 anaparam; anaparam = priv->anaparam; anaparam &= 0x000fffff; anaparam |= 0x3f900000; rtl8180_set_anaparam(priv, anaparam); write_grf5101(dev, 0x07, 0x0); write_grf5101(dev, 0x1f, 0x45); write_grf5101(dev, 0x1f, 0x5); write_grf5101(dev, 0x00, 0x8e4); } static void grf5101_rf_init(struct ieee80211_hw *dev) { struct rtl8180_priv *priv = dev->priv; rtl8180_set_anaparam(priv, priv->anaparam); write_grf5101(dev, 0x1f, 0x0); write_grf5101(dev, 0x1f, 0x0); write_grf5101(dev, 0x1f, 0x40); write_grf5101(dev, 0x1f, 0x60); write_grf5101(dev, 0x1f, 0x61); write_grf5101(dev, 0x1f, 0x61); write_grf5101(dev, 0x00, 0xae4); write_grf5101(dev, 0x1f, 0x1); write_grf5101(dev, 0x1f, 0x41); write_grf5101(dev, 0x1f, 0x61); write_grf5101(dev, 0x01, 0x1a23); write_grf5101(dev, 0x02, 0x4971); write_grf5101(dev, 0x03, 0x41de); write_grf5101(dev, 0x04, 0x2d80); write_grf5101(dev, 0x05, 0x68ff); /* 0x61ff original value */ write_grf5101(dev, 0x06, 0x0); write_grf5101(dev, 0x07, 0x0); write_grf5101(dev, 0x08, 0x7533); write_grf5101(dev, 0x09, 0xc401); write_grf5101(dev, 0x0a, 0x0); write_grf5101(dev, 0x0c, 0x1c7); write_grf5101(dev, 0x0d, 0x29d3); write_grf5101(dev, 0x0e, 0x2e8); write_grf5101(dev, 0x10, 0x192); write_grf5101(dev, 0x11, 0x248); write_grf5101(dev, 0x12, 0x0); write_grf5101(dev, 0x13, 0x20c4); write_grf5101(dev, 0x14, 0xf4fc); write_grf5101(dev, 0x15, 0x0); write_grf5101(dev, 0x16, 0x1500); write_grf5101(dev, 0x07, 0x1000); /* baseband configuration */ rtl8180_write_phy(dev, 0, 0xa8); rtl8180_write_phy(dev, 3, 0x0); rtl8180_write_phy(dev, 4, 0xc0); rtl8180_write_phy(dev, 5, 0x90); rtl8180_write_phy(dev, 6, 0x1e); rtl8180_write_phy(dev, 7, 0x64); grf5101_write_phy_antenna(dev, 1); rtl8180_write_phy(dev, 0x11, 0x88); if (rtl818x_ioread8(priv, &priv->map->CONFIG2) & RTL818X_CONFIG2_ANTENNA_DIV) rtl8180_write_phy(dev, 0x12, 0xc0); /* enable ant diversity */ else rtl8180_write_phy(dev, 0x12, 0x40); /* disable ant diversity */ rtl8180_write_phy(dev, 0x13, 0x90 | priv->csthreshold); rtl8180_write_phy(dev, 0x19, 0x0); rtl8180_write_phy(dev, 0x1a, 0xa0); rtl8180_write_phy(dev, 0x1b, 0x44); } const struct rtl818x_rf_ops grf5101_rf_ops = { .name = "GCT", .init = grf5101_rf_init, .stop = grf5101_rf_stop, .set_chan = grf5101_rf_set_channel, .calc_rssi = grf5101_rf_calc_rssi, };
gpl-2.0
liquidware/android-kernel-omap4
drivers/mtd/nand/nand_bcm_umi.c
9262
5019
/***************************************************************************** * Copyright 2004 - 2009 Broadcom Corporation. All rights reserved. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available at * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. *****************************************************************************/ /* ---- Include Files ---------------------------------------------------- */ #include <mach/reg_umi.h> #include "nand_bcm_umi.h" #ifdef BOOT0_BUILD #include <uart.h> #endif /* ---- External Variable Declarations ----------------------------------- */ /* ---- External Function Prototypes ------------------------------------- */ /* ---- Public Variables ------------------------------------------------- */ /* ---- Private Constants and Types -------------------------------------- */ /* ---- Private Function Prototypes -------------------------------------- */ /* ---- Private Variables ------------------------------------------------ */ /* ---- Private Functions ------------------------------------------------ */ #if NAND_ECC_BCH /**************************************************************************** * nand_bch_ecc_flip_bit - Routine to flip an errored bit * * PURPOSE: * This is a helper routine that flips the bit (0 -> 1 or 1 -> 0) of the * errored bit specified * * PARAMETERS: * datap - Container that holds the 512 byte data * errorLocation - Location of the bit that needs to be flipped * * RETURNS: * None ****************************************************************************/ static void nand_bcm_umi_bch_ecc_flip_bit(uint8_t *datap, int errorLocation) { int locWithinAByte = (errorLocation & REG_UMI_BCH_ERR_LOC_BYTE) >> 0; int locWithinAWord = (errorLocation & REG_UMI_BCH_ERR_LOC_WORD) >> 3; int locWithinAPage = (errorLocation & REG_UMI_BCH_ERR_LOC_PAGE) >> 5; uint8_t errorByte = 0; uint8_t byteMask = 1 << locWithinAByte; /* BCH uses big endian, need to change the location * bits to little endian */ locWithinAWord = 3 - locWithinAWord; errorByte = datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord]; #ifdef BOOT0_BUILD puthexs("\nECC Correct Offset: ", locWithinAPage * sizeof(uint32_t) + locWithinAWord); puthexs(" errorByte:", errorByte); puthex8(" Bit: ", locWithinAByte); #endif if (errorByte & byteMask) { /* bit needs to be cleared */ errorByte &= ~byteMask; } else { /* bit needs to be set */ errorByte |= byteMask; } /* write back the value with the fixed bit */ datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord] = errorByte; } /**************************************************************************** * nand_correct_page_bch - Routine to correct bit errors when reading NAND * * PURPOSE: * This routine reads the BCH registers to determine if there are any bit * errors during the read of the last 512 bytes of data + ECC bytes. If * errors exists, the routine fixes it. * * PARAMETERS: * datap - Container that holds the 512 byte data * * RETURNS: * 0 or greater = Number of errors corrected * (No errors are found or errors have been fixed) * -1 = Error(s) cannot be fixed ****************************************************************************/ int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData, int numEccBytes) { int numErrors; int errorLocation; int idx; uint32_t regValue; /* wait for read ECC to be valid */ regValue = nand_bcm_umi_bch_poll_read_ecc_calc(); /* * read the control status register to determine if there * are error'ed bits * see if errors are correctible */ if ((regValue & REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR) > 0) { int i; for (i = 0; i < numEccBytes; i++) { if (readEccData[i] != 0xff) { /* errors cannot be fixed, return -1 */ return -1; } } /* If ECC is unprogrammed then we can't correct, * assume everything OK */ return 0; } if ((regValue & REG_UMI_BCH_CTRL_STATUS_CORR_ERR) == 0) { /* no errors */ return 0; } /* * Fix errored bits by doing the following: * 1. Read the number of errors in the control and status register * 2. Read the error location registers that corresponds to the number * of errors reported * 3. Invert the bit in the data */ numErrors = (regValue & REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR) >> 20; for (idx = 0; idx < numErrors; idx++) { errorLocation = REG_UMI_BCH_ERR_LOC_ADDR(idx) & REG_UMI_BCH_ERR_LOC_MASK; /* Flip bit */ nand_bcm_umi_bch_ecc_flip_bit(datap, errorLocation); } /* Errors corrected */ return numErrors; } #endif
gpl-2.0
RolanDroid/lge_MonsterKernel-JB-Stock
fs/dlm/requestqueue.c
9262
5389
/****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions ** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "member.h" #include "lock.h" #include "dir.h" #include "config.h" #include "requestqueue.h" struct rq_entry { struct list_head list; int nodeid; struct dlm_message request; }; /* * Requests received while the lockspace is in recovery get added to the * request queue and processed when recovery is complete. This happens when * the lockspace is suspended on some nodes before it is on others, or the * lockspace is enabled on some while still suspended on others. */ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms) { struct rq_entry *e; int length = ms->m_header.h_length - sizeof(struct dlm_message); e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS); if (!e) { log_print("dlm_add_requestqueue: out of memory len %d", length); return; } e->nodeid = nodeid; memcpy(&e->request, ms, ms->m_header.h_length); mutex_lock(&ls->ls_requestqueue_mutex); list_add_tail(&e->list, &ls->ls_requestqueue); mutex_unlock(&ls->ls_requestqueue_mutex); } /* * Called by dlm_recoverd to process normal messages saved while recovery was * happening. Normal locking has been enabled before this is called. dlm_recv * upon receiving a message, will wait for all saved messages to be drained * here before processing the message it got. If a new dlm_ls_stop() arrives * while we're processing these saved messages, it may block trying to suspend * dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue. In that * case, we don't abort since locking_stopped is still 0. If dlm_recv is not * waiting for us, then this processing may be aborted due to locking_stopped. */ int dlm_process_requestqueue(struct dlm_ls *ls) { struct rq_entry *e; int error = 0; mutex_lock(&ls->ls_requestqueue_mutex); for (;;) { if (list_empty(&ls->ls_requestqueue)) { mutex_unlock(&ls->ls_requestqueue_mutex); error = 0; break; } e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); mutex_unlock(&ls->ls_requestqueue_mutex); dlm_receive_message_saved(ls, &e->request); mutex_lock(&ls->ls_requestqueue_mutex); list_del(&e->list); kfree(e); if (dlm_locking_stopped(ls)) { log_debug(ls, "process_requestqueue abort running"); mutex_unlock(&ls->ls_requestqueue_mutex); error = -EINTR; break; } schedule(); } return error; } /* * After recovery is done, locking is resumed and dlm_recoverd takes all the * saved requests and processes them as they would have been by dlm_recv. At * the same time, dlm_recv will start receiving new requests from remote nodes. * We want to delay dlm_recv processing new requests until dlm_recoverd has * finished processing the old saved requests. We don't check for locking * stopped here because dlm_ls_stop won't stop locking until it's suspended us * (dlm_recv). */ void dlm_wait_requestqueue(struct dlm_ls *ls) { for (;;) { mutex_lock(&ls->ls_requestqueue_mutex); if (list_empty(&ls->ls_requestqueue)) break; mutex_unlock(&ls->ls_requestqueue_mutex); schedule(); } mutex_unlock(&ls->ls_requestqueue_mutex); } static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) { uint32_t type = ms->m_type; /* the ls is being cleaned up and freed by release_lockspace */ if (!ls->ls_count) return 1; if (dlm_is_removed(ls, nodeid)) return 1; /* directory operations are always purged because the directory is always rebuilt during recovery and the lookups resent */ if (type == DLM_MSG_REMOVE || type == DLM_MSG_LOOKUP || type == DLM_MSG_LOOKUP_REPLY) return 1; if (!dlm_no_directory(ls)) return 0; /* with no directory, the master is likely to change as a part of recovery; requests to/from the defunct master need to be purged */ switch (type) { case DLM_MSG_REQUEST: case DLM_MSG_CONVERT: case DLM_MSG_UNLOCK: case DLM_MSG_CANCEL: /* we're no longer the master of this resource, the sender will resend to the new master (see waiter_needs_recovery) */ if (dlm_hash2nodeid(ls, ms->m_hash) != dlm_our_nodeid()) return 1; break; case DLM_MSG_REQUEST_REPLY: case DLM_MSG_CONVERT_REPLY: case DLM_MSG_UNLOCK_REPLY: case DLM_MSG_CANCEL_REPLY: case DLM_MSG_GRANT: /* this reply is from the former master of the resource, we'll resend to the new master if needed */ if (dlm_hash2nodeid(ls, ms->m_hash) != nodeid) return 1; break; } return 0; } void dlm_purge_requestqueue(struct dlm_ls *ls) { struct dlm_message *ms; struct rq_entry *e, *safe; mutex_lock(&ls->ls_requestqueue_mutex); list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { ms = &e->request; if (purge_request(ls, ms, e->nodeid)) { list_del(&e->list); kfree(e); } } mutex_unlock(&ls->ls_requestqueue_mutex); }
gpl-2.0
ricardon/omap-audio
arch/arm/mach-mmp/clock.c
9774
2092
/* * linux/arch/arm/mach-mmp/clock.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/clk.h> #include <linux/io.h> #include <mach/regs-apbc.h> #include "clock.h" static void apbc_clk_enable(struct clk *clk) { uint32_t clk_rst; clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(clk->fnclksel); __raw_writel(clk_rst, clk->clk_rst); } static void apbc_clk_disable(struct clk *clk) { __raw_writel(0, clk->clk_rst); } struct clkops apbc_clk_ops = { .enable = apbc_clk_enable, .disable = apbc_clk_disable, }; static void apmu_clk_enable(struct clk *clk) { __raw_writel(clk->enable_val, clk->clk_rst); } static void apmu_clk_disable(struct clk *clk) { __raw_writel(0, clk->clk_rst); } struct clkops apmu_clk_ops = { .enable = apmu_clk_enable, .disable = apmu_clk_disable, }; static DEFINE_SPINLOCK(clocks_lock); int clk_enable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); if (clk->enabled++ == 0) clk->ops->enable(clk); spin_unlock_irqrestore(&clocks_lock, flags); return 0; } EXPORT_SYMBOL(clk_enable); void clk_disable(struct clk *clk) { unsigned long flags; WARN_ON(clk->enabled == 0); spin_lock_irqsave(&clocks_lock, flags); if (--clk->enabled == 0) clk->ops->disable(clk); spin_unlock_irqrestore(&clocks_lock, flags); } EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { unsigned long rate; if (clk->ops->getrate) rate = clk->ops->getrate(clk); else rate = clk->rate; return rate; } EXPORT_SYMBOL(clk_get_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { unsigned long flags; int ret = -EINVAL; if (clk->ops->setrate) { spin_lock_irqsave(&clocks_lock, flags); ret = clk->ops->setrate(clk, rate); spin_unlock_irqrestore(&clocks_lock, flags); } return ret; } EXPORT_SYMBOL(clk_set_rate);
gpl-2.0
lyn1337/LinuxDSc2
linux-2.6.x/net/bridge/netfilter/ebt_mark_m.c
47
1450
/* * ebt_mark_m * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * * July, 2002 * */ #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_mark_m.h> #include <linux/module.h> static int ebt_filter_mark(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const void *data, unsigned int datalen) { struct ebt_mark_m_info *info = (struct ebt_mark_m_info *) data; if (info->bitmask & EBT_MARK_OR) return !(!!(skb->nfmark & info->mask) ^ info->invert); return !(((skb->nfmark & info->mask) == info->mark) ^ info->invert); } static int ebt_mark_check(const char *tablename, unsigned int hookmask, const struct ebt_entry *e, void *data, unsigned int datalen) { struct ebt_mark_m_info *info = (struct ebt_mark_m_info *) data; if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_m_info))) return -EINVAL; if (info->bitmask & ~EBT_MARK_MASK) return -EINVAL; if ((info->bitmask & EBT_MARK_OR) && (info->bitmask & EBT_MARK_AND)) return -EINVAL; if (!info->bitmask) return -EINVAL; return 0; } static struct ebt_match filter_mark = { .name = EBT_MARK_MATCH, .match = ebt_filter_mark, .check = ebt_mark_check, .me = THIS_MODULE, }; static int __init init(void) { return ebt_register_match(&filter_mark); } static void __exit fini(void) { ebt_unregister_match(&filter_mark); } module_init(init); module_exit(fini); MODULE_LICENSE("GPL");
gpl-2.0
vbatts/linux
block/blk-cgroup.c
47
36379
/* * Common Block IO controller cgroup interface * * Based on ideas and code from CFQ, CFS and BFQ: * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> * * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> * Paolo Valente <paolo.valente@unimore.it> * * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> * Nauman Rafique <nauman@google.com> * * For policy-specific per-blkcg data: * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> * Arianna Avanzini <avanzini.arianna@gmail.com> */ #include <linux/ioprio.h> #include <linux/kdev_t.h> #include <linux/module.h> #include <linux/err.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/slab.h> #include <linux/genhd.h> #include <linux/delay.h> #include <linux/atomic.h> #include <linux/ctype.h> #include <linux/blk-cgroup.h> #include "blk.h" #define MAX_KEY_LEN 100 /* * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. * blkcg_pol_register_mutex nests outside of it and synchronizes entire * policy [un]register operations including cgroup file additions / * removals. Putting cgroup file registration outside blkcg_pol_mutex * allows grabbing it from cgroup callbacks. */ static DEFINE_MUTEX(blkcg_pol_register_mutex); static DEFINE_MUTEX(blkcg_pol_mutex); struct blkcg blkcg_root; EXPORT_SYMBOL_GPL(blkcg_root); struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ static bool blkcg_policy_enabled(struct request_queue *q, const struct blkcg_policy *pol) { return pol && test_bit(pol->plid, q->blkcg_pols); } /** * blkg_free - free a blkg * @blkg: blkg to free * * Free @blkg which may be partially allocated. */ static void blkg_free(struct blkcg_gq *blkg) { int i; if (!blkg) return; for (i = 0; i < BLKCG_MAX_POLS; i++) if (blkg->pd[i]) blkcg_policy[i]->pd_free_fn(blkg->pd[i]); if (blkg->blkcg != &blkcg_root) blk_exit_rl(&blkg->rl); blkg_rwstat_exit(&blkg->stat_ios); blkg_rwstat_exit(&blkg->stat_bytes); kfree(blkg); } /** * blkg_alloc - allocate a blkg * @blkcg: block cgroup the new blkg is associated with * @q: request_queue the new blkg is associated with * @gfp_mask: allocation mask to use * * Allocate a new blkg assocating @blkcg and @q. */ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, gfp_t gfp_mask) { struct blkcg_gq *blkg; int i; /* alloc and init base part */ blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); if (!blkg) return NULL; if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) || blkg_rwstat_init(&blkg->stat_ios, gfp_mask)) goto err_free; blkg->q = q; INIT_LIST_HEAD(&blkg->q_node); blkg->blkcg = blkcg; atomic_set(&blkg->refcnt, 1); /* root blkg uses @q->root_rl, init rl only for !root blkgs */ if (blkcg != &blkcg_root) { if (blk_init_rl(&blkg->rl, q, gfp_mask)) goto err_free; blkg->rl.blkg = blkg; } for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; struct blkg_policy_data *pd; if (!blkcg_policy_enabled(q, pol)) continue; /* alloc per-policy data and attach it to blkg */ pd = pol->pd_alloc_fn(gfp_mask, q->node); if (!pd) goto err_free; blkg->pd[i] = pd; pd->blkg = blkg; pd->plid = i; } return blkg; err_free: blkg_free(blkg); return NULL; } struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint) { struct blkcg_gq *blkg; /* * Hint didn't match. Look up from the radix tree. Note that the * hint can only be updated under queue_lock as otherwise @blkg * could have already been removed from blkg_tree. The caller is * responsible for grabbing queue_lock if @update_hint. */ blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); if (blkg && blkg->q == q) { if (update_hint) { lockdep_assert_held(q->queue_lock); rcu_assign_pointer(blkcg->blkg_hint, blkg); } return blkg; } return NULL; } EXPORT_SYMBOL_GPL(blkg_lookup_slowpath); /* * If @new_blkg is %NULL, this function tries to allocate a new one as * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. */ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct request_queue *q, struct blkcg_gq *new_blkg) { struct blkcg_gq *blkg; struct bdi_writeback_congested *wb_congested; int i, ret; WARN_ON_ONCE(!rcu_read_lock_held()); lockdep_assert_held(q->queue_lock); /* blkg holds a reference to blkcg */ if (!css_tryget_online(&blkcg->css)) { ret = -ENODEV; goto err_free_blkg; } wb_congested = wb_congested_get_create(&q->backing_dev_info, blkcg->css.id, GFP_NOWAIT); if (!wb_congested) { ret = -ENOMEM; goto err_put_css; } /* allocate */ if (!new_blkg) { new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT); if (unlikely(!new_blkg)) { ret = -ENOMEM; goto err_put_congested; } } blkg = new_blkg; blkg->wb_congested = wb_congested; /* link parent */ if (blkcg_parent(blkcg)) { blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); if (WARN_ON_ONCE(!blkg->parent)) { ret = -ENODEV; goto err_put_congested; } blkg_get(blkg->parent); } /* invoke per-policy init */ for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; if (blkg->pd[i] && pol->pd_init_fn) pol->pd_init_fn(blkg->pd[i]); } /* insert */ spin_lock(&blkcg->lock); ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); if (likely(!ret)) { hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); list_add(&blkg->q_node, &q->blkg_list); for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; if (blkg->pd[i] && pol->pd_online_fn) pol->pd_online_fn(blkg->pd[i]); } } blkg->online = true; spin_unlock(&blkcg->lock); if (!ret) return blkg; /* @blkg failed fully initialized, use the usual release path */ blkg_put(blkg); return ERR_PTR(ret); err_put_congested: wb_congested_put(wb_congested); err_put_css: css_put(&blkcg->css); err_free_blkg: blkg_free(new_blkg); return ERR_PTR(ret); } /** * blkg_lookup_create - lookup blkg, try to create one if not there * @blkcg: blkcg of interest * @q: request_queue of interest * * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to * create one. blkg creation is performed recursively from blkcg_root such * that all non-root blkg's have access to the parent blkg. This function * should be called under RCU read lock and @q->queue_lock. * * Returns pointer to the looked up or created blkg on success, ERR_PTR() * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not * dead and bypassing, returns ERR_PTR(-EBUSY). */ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q) { struct blkcg_gq *blkg; WARN_ON_ONCE(!rcu_read_lock_held()); lockdep_assert_held(q->queue_lock); /* * This could be the first entry point of blkcg implementation and * we shouldn't allow anything to go through for a bypassing queue. */ if (unlikely(blk_queue_bypass(q))) return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY); blkg = __blkg_lookup(blkcg, q, true); if (blkg) return blkg; /* * Create blkgs walking down from blkcg_root to @blkcg, so that all * non-root blkgs have access to their parents. */ while (true) { struct blkcg *pos = blkcg; struct blkcg *parent = blkcg_parent(blkcg); while (parent && !__blkg_lookup(parent, q, false)) { pos = parent; parent = blkcg_parent(parent); } blkg = blkg_create(pos, q, NULL); if (pos == blkcg || IS_ERR(blkg)) return blkg; } } static void blkg_destroy(struct blkcg_gq *blkg) { struct blkcg *blkcg = blkg->blkcg; struct blkcg_gq *parent = blkg->parent; int i; lockdep_assert_held(blkg->q->queue_lock); lockdep_assert_held(&blkcg->lock); /* Something wrong if we are trying to remove same group twice */ WARN_ON_ONCE(list_empty(&blkg->q_node)); WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; if (blkg->pd[i] && pol->pd_offline_fn) pol->pd_offline_fn(blkg->pd[i]); } if (parent) { blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); } blkg->online = false; radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); list_del_init(&blkg->q_node); hlist_del_init_rcu(&blkg->blkcg_node); /* * Both setting lookup hint to and clearing it from @blkg are done * under queue_lock. If it's not pointing to @blkg now, it never * will. Hint assignment itself can race safely. */ if (rcu_access_pointer(blkcg->blkg_hint) == blkg) rcu_assign_pointer(blkcg->blkg_hint, NULL); /* * Put the reference taken at the time of creation so that when all * queues are gone, group can be destroyed. */ blkg_put(blkg); } /** * blkg_destroy_all - destroy all blkgs associated with a request_queue * @q: request_queue of interest * * Destroy all blkgs associated with @q. */ static void blkg_destroy_all(struct request_queue *q) { struct blkcg_gq *blkg, *n; lockdep_assert_held(q->queue_lock); list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { struct blkcg *blkcg = blkg->blkcg; spin_lock(&blkcg->lock); blkg_destroy(blkg); spin_unlock(&blkcg->lock); } } /* * A group is RCU protected, but having an rcu lock does not mean that one * can access all the fields of blkg and assume these are valid. For * example, don't try to follow throtl_data and request queue links. * * Having a reference to blkg under an rcu allows accesses to only values * local to groups like group stats and group rate limits. */ void __blkg_release_rcu(struct rcu_head *rcu_head) { struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); /* release the blkcg and parent blkg refs this blkg has been holding */ css_put(&blkg->blkcg->css); if (blkg->parent) blkg_put(blkg->parent); wb_congested_put(blkg->wb_congested); blkg_free(blkg); } EXPORT_SYMBOL_GPL(__blkg_release_rcu); /* * The next function used by blk_queue_for_each_rl(). It's a bit tricky * because the root blkg uses @q->root_rl instead of its own rl. */ struct request_list *__blk_queue_next_rl(struct request_list *rl, struct request_queue *q) { struct list_head *ent; struct blkcg_gq *blkg; /* * Determine the current blkg list_head. The first entry is * root_rl which is off @q->blkg_list and mapped to the head. */ if (rl == &q->root_rl) { ent = &q->blkg_list; /* There are no more block groups, hence no request lists */ if (list_empty(ent)) return NULL; } else { blkg = container_of(rl, struct blkcg_gq, rl); ent = &blkg->q_node; } /* walk to the next list_head, skip root blkcg */ ent = ent->next; if (ent == &q->root_blkg->q_node) ent = ent->next; if (ent == &q->blkg_list) return NULL; blkg = container_of(ent, struct blkcg_gq, q_node); return &blkg->rl; } static int blkcg_reset_stats(struct cgroup_subsys_state *css, struct cftype *cftype, u64 val) { struct blkcg *blkcg = css_to_blkcg(css); struct blkcg_gq *blkg; int i; mutex_lock(&blkcg_pol_mutex); spin_lock_irq(&blkcg->lock); /* * Note that stat reset is racy - it doesn't synchronize against * stat updates. This is a debug feature which shouldn't exist * anyway. If you get hit by a race, retry. */ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { blkg_rwstat_reset(&blkg->stat_bytes); blkg_rwstat_reset(&blkg->stat_ios); for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; if (blkg->pd[i] && pol->pd_reset_stats_fn) pol->pd_reset_stats_fn(blkg->pd[i]); } } spin_unlock_irq(&blkcg->lock); mutex_unlock(&blkcg_pol_mutex); return 0; } const char *blkg_dev_name(struct blkcg_gq *blkg) { /* some drivers (floppy) instantiate a queue w/o disk registered */ if (blkg->q->backing_dev_info.dev) return dev_name(blkg->q->backing_dev_info.dev); return NULL; } EXPORT_SYMBOL_GPL(blkg_dev_name); /** * blkcg_print_blkgs - helper for printing per-blkg data * @sf: seq_file to print to * @blkcg: blkcg of interest * @prfill: fill function to print out a blkg * @pol: policy in question * @data: data to be passed to @prfill * @show_total: to print out sum of prfill return values or not * * This function invokes @prfill on each blkg of @blkcg if pd for the * policy specified by @pol exists. @prfill is invoked with @sf, the * policy data and @data and the matching queue lock held. If @show_total * is %true, the sum of the return values from @prfill is printed with * "Total" label at the end. * * This is to be used to construct print functions for * cftype->read_seq_string method. */ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int), const struct blkcg_policy *pol, int data, bool show_total) { struct blkcg_gq *blkg; u64 total = 0; rcu_read_lock(); hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { spin_lock_irq(blkg->q->queue_lock); if (blkcg_policy_enabled(blkg->q, pol)) total += prfill(sf, blkg->pd[pol->plid], data); spin_unlock_irq(blkg->q->queue_lock); } rcu_read_unlock(); if (show_total) seq_printf(sf, "Total %llu\n", (unsigned long long)total); } EXPORT_SYMBOL_GPL(blkcg_print_blkgs); /** * __blkg_prfill_u64 - prfill helper for a single u64 value * @sf: seq_file to print to * @pd: policy private data of interest * @v: value to print * * Print @v to @sf for the device assocaited with @pd. */ u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) { const char *dname = blkg_dev_name(pd->blkg); if (!dname) return 0; seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); return v; } EXPORT_SYMBOL_GPL(__blkg_prfill_u64); /** * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat * @sf: seq_file to print to * @pd: policy private data of interest * @rwstat: rwstat to print * * Print @rwstat to @sf for the device assocaited with @pd. */ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, const struct blkg_rwstat *rwstat) { static const char *rwstr[] = { [BLKG_RWSTAT_READ] = "Read", [BLKG_RWSTAT_WRITE] = "Write", [BLKG_RWSTAT_SYNC] = "Sync", [BLKG_RWSTAT_ASYNC] = "Async", }; const char *dname = blkg_dev_name(pd->blkg); u64 v; int i; if (!dname) return 0; for (i = 0; i < BLKG_RWSTAT_NR; i++) seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], (unsigned long long)atomic64_read(&rwstat->aux_cnt[i])); v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) + atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]); seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); return v; } EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); /** * blkg_prfill_stat - prfill callback for blkg_stat * @sf: seq_file to print to * @pd: policy private data of interest * @off: offset to the blkg_stat in @pd * * prfill callback for printing a blkg_stat. */ u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) { return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); } EXPORT_SYMBOL_GPL(blkg_prfill_stat); /** * blkg_prfill_rwstat - prfill callback for blkg_rwstat * @sf: seq_file to print to * @pd: policy private data of interest * @off: offset to the blkg_rwstat in @pd * * prfill callback for printing a blkg_rwstat. */ u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); return __blkg_prfill_rwstat(sf, pd, &rwstat); } EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); static u64 blkg_prfill_rwstat_field(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off); return __blkg_prfill_rwstat(sf, pd, &rwstat); } /** * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes * @sf: seq_file to print to * @v: unused * * To be used as cftype->seq_show to print blkg->stat_bytes. * cftype->private must be set to the blkcg_policy. */ int blkg_print_stat_bytes(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private, offsetof(struct blkcg_gq, stat_bytes), true); return 0; } EXPORT_SYMBOL_GPL(blkg_print_stat_bytes); /** * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios * @sf: seq_file to print to * @v: unused * * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private * must be set to the blkcg_policy. */ int blkg_print_stat_ios(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private, offsetof(struct blkcg_gq, stat_ios), true); return 0; } EXPORT_SYMBOL_GPL(blkg_print_stat_ios); static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg, NULL, off); return __blkg_prfill_rwstat(sf, pd, &rwstat); } /** * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes * @sf: seq_file to print to * @v: unused */ int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat_field_recursive, (void *)seq_cft(sf)->private, offsetof(struct blkcg_gq, stat_bytes), true); return 0; } EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive); /** * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios * @sf: seq_file to print to * @v: unused */ int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat_field_recursive, (void *)seq_cft(sf)->private, offsetof(struct blkcg_gq, stat_ios), true); return 0; } EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive); /** * blkg_stat_recursive_sum - collect hierarchical blkg_stat * @blkg: blkg of interest * @pol: blkcg_policy which contains the blkg_stat * @off: offset to the blkg_stat in blkg_policy_data or @blkg * * Collect the blkg_stat specified by @blkg, @pol and @off and all its * online descendants and their aux counts. The caller must be holding the * queue lock for online tests. * * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is * at @off bytes into @blkg's blkg_policy_data of the policy. */ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol, int off) { struct blkcg_gq *pos_blkg; struct cgroup_subsys_state *pos_css; u64 sum = 0; lockdep_assert_held(blkg->q->queue_lock); rcu_read_lock(); blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { struct blkg_stat *stat; if (!pos_blkg->online) continue; if (pol) stat = (void *)blkg_to_pd(pos_blkg, pol) + off; else stat = (void *)blkg + off; sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt); } rcu_read_unlock(); return sum; } EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum); /** * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat * @blkg: blkg of interest * @pol: blkcg_policy which contains the blkg_rwstat * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg * * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its * online descendants and their aux counts. The caller must be holding the * queue lock for online tests. * * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it * is at @off bytes into @blkg's blkg_policy_data of the policy. */ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol, int off) { struct blkcg_gq *pos_blkg; struct cgroup_subsys_state *pos_css; struct blkg_rwstat sum = { }; int i; lockdep_assert_held(blkg->q->queue_lock); rcu_read_lock(); blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { struct blkg_rwstat *rwstat; if (!pos_blkg->online) continue; if (pol) rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off; else rwstat = (void *)pos_blkg + off; for (i = 0; i < BLKG_RWSTAT_NR; i++) atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) + percpu_counter_sum_positive(&rwstat->cpu_cnt[i]), &sum.aux_cnt[i]); } rcu_read_unlock(); return sum; } EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum); /** * blkg_conf_prep - parse and prepare for per-blkg config update * @blkcg: target block cgroup * @pol: target policy * @input: input string * @ctx: blkg_conf_ctx to be filled * * Parse per-blkg config update from @input and initialize @ctx with the * result. @ctx->blkg points to the blkg to be updated and @ctx->body the * part of @input following MAJ:MIN. This function returns with RCU read * lock and queue lock held and must be paired with blkg_conf_finish(). */ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, char *input, struct blkg_conf_ctx *ctx) __acquires(rcu) __acquires(disk->queue->queue_lock) { struct gendisk *disk; struct blkcg_gq *blkg; unsigned int major, minor; int key_len, part, ret; char *body; if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) return -EINVAL; body = input + key_len; if (!isspace(*body)) return -EINVAL; body = skip_spaces(body); disk = get_gendisk(MKDEV(major, minor), &part); if (!disk) return -ENODEV; if (part) { put_disk(disk); return -ENODEV; } rcu_read_lock(); spin_lock_irq(disk->queue->queue_lock); if (blkcg_policy_enabled(disk->queue, pol)) blkg = blkg_lookup_create(blkcg, disk->queue); else blkg = ERR_PTR(-EOPNOTSUPP); if (IS_ERR(blkg)) { ret = PTR_ERR(blkg); rcu_read_unlock(); spin_unlock_irq(disk->queue->queue_lock); put_disk(disk); /* * If queue was bypassing, we should retry. Do so after a * short msleep(). It isn't strictly necessary but queue * can be bypassing for some time and it's always nice to * avoid busy looping. */ if (ret == -EBUSY) { msleep(10); ret = restart_syscall(); } return ret; } ctx->disk = disk; ctx->blkg = blkg; ctx->body = body; return 0; } EXPORT_SYMBOL_GPL(blkg_conf_prep); /** * blkg_conf_finish - finish up per-blkg config update * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() * * Finish up after per-blkg config update. This function must be paired * with blkg_conf_prep(). */ void blkg_conf_finish(struct blkg_conf_ctx *ctx) __releases(ctx->disk->queue->queue_lock) __releases(rcu) { spin_unlock_irq(ctx->disk->queue->queue_lock); rcu_read_unlock(); put_disk(ctx->disk); } EXPORT_SYMBOL_GPL(blkg_conf_finish); static int blkcg_print_stat(struct seq_file *sf, void *v) { struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); struct blkcg_gq *blkg; rcu_read_lock(); hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { const char *dname; struct blkg_rwstat rwstat; u64 rbytes, wbytes, rios, wios; dname = blkg_dev_name(blkg); if (!dname) continue; spin_lock_irq(blkg->q->queue_lock); rwstat = blkg_rwstat_recursive_sum(blkg, NULL, offsetof(struct blkcg_gq, stat_bytes)); rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); rwstat = blkg_rwstat_recursive_sum(blkg, NULL, offsetof(struct blkcg_gq, stat_ios)); rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); spin_unlock_irq(blkg->q->queue_lock); if (rbytes || wbytes || rios || wios) seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n", dname, rbytes, wbytes, rios, wios); } rcu_read_unlock(); return 0; } struct cftype blkcg_files[] = { { .name = "stat", .seq_show = blkcg_print_stat, }, { } /* terminate */ }; struct cftype blkcg_legacy_files[] = { { .name = "reset_stats", .write_u64 = blkcg_reset_stats, }, { } /* terminate */ }; /** * blkcg_css_offline - cgroup css_offline callback * @css: css of interest * * This function is called when @css is about to go away and responsible * for shooting down all blkgs associated with @css. blkgs should be * removed while holding both q and blkcg locks. As blkcg lock is nested * inside q lock, this function performs reverse double lock dancing. * * This is the blkcg counterpart of ioc_release_fn(). */ static void blkcg_css_offline(struct cgroup_subsys_state *css) { struct blkcg *blkcg = css_to_blkcg(css); spin_lock_irq(&blkcg->lock); while (!hlist_empty(&blkcg->blkg_list)) { struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, struct blkcg_gq, blkcg_node); struct request_queue *q = blkg->q; if (spin_trylock(q->queue_lock)) { blkg_destroy(blkg); spin_unlock(q->queue_lock); } else { spin_unlock_irq(&blkcg->lock); cpu_relax(); spin_lock_irq(&blkcg->lock); } } spin_unlock_irq(&blkcg->lock); wb_blkcg_offline(blkcg); } static void blkcg_css_free(struct cgroup_subsys_state *css) { struct blkcg *blkcg = css_to_blkcg(css); int i; mutex_lock(&blkcg_pol_mutex); list_del(&blkcg->all_blkcgs_node); for (i = 0; i < BLKCG_MAX_POLS; i++) if (blkcg->cpd[i]) blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); mutex_unlock(&blkcg_pol_mutex); kfree(blkcg); } static struct cgroup_subsys_state * blkcg_css_alloc(struct cgroup_subsys_state *parent_css) { struct blkcg *blkcg; struct cgroup_subsys_state *ret; int i; mutex_lock(&blkcg_pol_mutex); if (!parent_css) { blkcg = &blkcg_root; } else { blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); if (!blkcg) { ret = ERR_PTR(-ENOMEM); goto free_blkcg; } } for (i = 0; i < BLKCG_MAX_POLS ; i++) { struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg_policy_data *cpd; /* * If the policy hasn't been attached yet, wait for it * to be attached before doing anything else. Otherwise, * check if the policy requires any specific per-cgroup * data: if it does, allocate and initialize it. */ if (!pol || !pol->cpd_alloc_fn) continue; cpd = pol->cpd_alloc_fn(GFP_KERNEL); if (!cpd) { ret = ERR_PTR(-ENOMEM); goto free_pd_blkcg; } blkcg->cpd[i] = cpd; cpd->blkcg = blkcg; cpd->plid = i; if (pol->cpd_init_fn) pol->cpd_init_fn(cpd); } spin_lock_init(&blkcg->lock); INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT); INIT_HLIST_HEAD(&blkcg->blkg_list); #ifdef CONFIG_CGROUP_WRITEBACK INIT_LIST_HEAD(&blkcg->cgwb_list); #endif list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); mutex_unlock(&blkcg_pol_mutex); return &blkcg->css; free_pd_blkcg: for (i--; i >= 0; i--) if (blkcg->cpd[i]) blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); free_blkcg: kfree(blkcg); mutex_unlock(&blkcg_pol_mutex); return ret; } /** * blkcg_init_queue - initialize blkcg part of request queue * @q: request_queue to initialize * * Called from blk_alloc_queue_node(). Responsible for initializing blkcg * part of new request_queue @q. * * RETURNS: * 0 on success, -errno on failure. */ int blkcg_init_queue(struct request_queue *q) { struct blkcg_gq *new_blkg, *blkg; bool preloaded; int ret; new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); if (!new_blkg) return -ENOMEM; preloaded = !radix_tree_preload(GFP_KERNEL); /* * Make sure the root blkg exists and count the existing blkgs. As * @q is bypassing at this point, blkg_lookup_create() can't be * used. Open code insertion. */ rcu_read_lock(); spin_lock_irq(q->queue_lock); blkg = blkg_create(&blkcg_root, q, new_blkg); spin_unlock_irq(q->queue_lock); rcu_read_unlock(); if (preloaded) radix_tree_preload_end(); if (IS_ERR(blkg)) { blkg_free(new_blkg); return PTR_ERR(blkg); } q->root_blkg = blkg; q->root_rl.blkg = blkg; ret = blk_throtl_init(q); if (ret) { spin_lock_irq(q->queue_lock); blkg_destroy_all(q); spin_unlock_irq(q->queue_lock); } return ret; } /** * blkcg_drain_queue - drain blkcg part of request_queue * @q: request_queue to drain * * Called from blk_drain_queue(). Responsible for draining blkcg part. */ void blkcg_drain_queue(struct request_queue *q) { lockdep_assert_held(q->queue_lock); /* * @q could be exiting and already have destroyed all blkgs as * indicated by NULL root_blkg. If so, don't confuse policies. */ if (!q->root_blkg) return; blk_throtl_drain(q); } /** * blkcg_exit_queue - exit and release blkcg part of request_queue * @q: request_queue being released * * Called from blk_release_queue(). Responsible for exiting blkcg part. */ void blkcg_exit_queue(struct request_queue *q) { spin_lock_irq(q->queue_lock); blkg_destroy_all(q); spin_unlock_irq(q->queue_lock); blk_throtl_exit(q); } /* * We cannot support shared io contexts, as we have no mean to support * two tasks with the same ioc in two different groups without major rework * of the main cic data structures. For now we allow a task to change * its cgroup only if it's the only owner of its ioc. */ static int blkcg_can_attach(struct cgroup_subsys_state *css, struct cgroup_taskset *tset) { struct task_struct *task; struct io_context *ioc; int ret = 0; /* task_lock() is needed to avoid races with exit_io_context() */ cgroup_taskset_for_each(task, tset) { task_lock(task); ioc = task->io_context; if (ioc && atomic_read(&ioc->nr_tasks) > 1) ret = -EINVAL; task_unlock(task); if (ret) break; } return ret; } static void blkcg_bind(struct cgroup_subsys_state *root_css) { int i; mutex_lock(&blkcg_pol_mutex); for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg *blkcg; if (!pol || !pol->cpd_bind_fn) continue; list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) if (blkcg->cpd[pol->plid]) pol->cpd_bind_fn(blkcg->cpd[pol->plid]); } mutex_unlock(&blkcg_pol_mutex); } struct cgroup_subsys io_cgrp_subsys = { .css_alloc = blkcg_css_alloc, .css_offline = blkcg_css_offline, .css_free = blkcg_css_free, .can_attach = blkcg_can_attach, .bind = blkcg_bind, .dfl_cftypes = blkcg_files, .legacy_cftypes = blkcg_legacy_files, .legacy_name = "blkio", #ifdef CONFIG_MEMCG /* * This ensures that, if available, memcg is automatically enabled * together on the default hierarchy so that the owner cgroup can * be retrieved from writeback pages. */ .depends_on = 1 << memory_cgrp_id, #endif }; EXPORT_SYMBOL_GPL(io_cgrp_subsys); /** * blkcg_activate_policy - activate a blkcg policy on a request_queue * @q: request_queue of interest * @pol: blkcg policy to activate * * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through * bypass mode to populate its blkgs with policy_data for @pol. * * Activation happens with @q bypassed, so nobody would be accessing blkgs * from IO path. Update of each blkg is protected by both queue and blkcg * locks so that holding either lock and testing blkcg_policy_enabled() is * always enough for dereferencing policy data. * * The caller is responsible for synchronizing [de]activations and policy * [un]registerations. Returns 0 on success, -errno on failure. */ int blkcg_activate_policy(struct request_queue *q, const struct blkcg_policy *pol) { struct blkg_policy_data *pd_prealloc = NULL; struct blkcg_gq *blkg; int ret; if (blkcg_policy_enabled(q, pol)) return 0; blk_queue_bypass_start(q); pd_prealloc: if (!pd_prealloc) { pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node); if (!pd_prealloc) { ret = -ENOMEM; goto out_bypass_end; } } spin_lock_irq(q->queue_lock); list_for_each_entry(blkg, &q->blkg_list, q_node) { struct blkg_policy_data *pd; if (blkg->pd[pol->plid]) continue; pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node); if (!pd) swap(pd, pd_prealloc); if (!pd) { spin_unlock_irq(q->queue_lock); goto pd_prealloc; } blkg->pd[pol->plid] = pd; pd->blkg = blkg; pd->plid = pol->plid; if (pol->pd_init_fn) pol->pd_init_fn(pd); } __set_bit(pol->plid, q->blkcg_pols); ret = 0; spin_unlock_irq(q->queue_lock); out_bypass_end: blk_queue_bypass_end(q); if (pd_prealloc) pol->pd_free_fn(pd_prealloc); return ret; } EXPORT_SYMBOL_GPL(blkcg_activate_policy); /** * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue * @q: request_queue of interest * @pol: blkcg policy to deactivate * * Deactivate @pol on @q. Follows the same synchronization rules as * blkcg_activate_policy(). */ void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol) { struct blkcg_gq *blkg; if (!blkcg_policy_enabled(q, pol)) return; blk_queue_bypass_start(q); spin_lock_irq(q->queue_lock); __clear_bit(pol->plid, q->blkcg_pols); list_for_each_entry(blkg, &q->blkg_list, q_node) { /* grab blkcg lock too while removing @pd from @blkg */ spin_lock(&blkg->blkcg->lock); if (blkg->pd[pol->plid]) { if (pol->pd_offline_fn) pol->pd_offline_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]); blkg->pd[pol->plid] = NULL; } spin_unlock(&blkg->blkcg->lock); } spin_unlock_irq(q->queue_lock); blk_queue_bypass_end(q); } EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); /** * blkcg_policy_register - register a blkcg policy * @pol: blkcg policy to register * * Register @pol with blkcg core. Might sleep and @pol may be modified on * successful registration. Returns 0 on success and -errno on failure. */ int blkcg_policy_register(struct blkcg_policy *pol) { struct blkcg *blkcg; int i, ret; mutex_lock(&blkcg_pol_register_mutex); mutex_lock(&blkcg_pol_mutex); /* find an empty slot */ ret = -ENOSPC; for (i = 0; i < BLKCG_MAX_POLS; i++) if (!blkcg_policy[i]) break; if (i >= BLKCG_MAX_POLS) goto err_unlock; /* register @pol */ pol->plid = i; blkcg_policy[pol->plid] = pol; /* allocate and install cpd's */ if (pol->cpd_alloc_fn) { list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { struct blkcg_policy_data *cpd; cpd = pol->cpd_alloc_fn(GFP_KERNEL); if (!cpd) { mutex_unlock(&blkcg_pol_mutex); goto err_free_cpds; } blkcg->cpd[pol->plid] = cpd; cpd->blkcg = blkcg; cpd->plid = pol->plid; pol->cpd_init_fn(cpd); } } mutex_unlock(&blkcg_pol_mutex); /* everything is in place, add intf files for the new policy */ if (pol->dfl_cftypes) WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, pol->dfl_cftypes)); if (pol->legacy_cftypes) WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, pol->legacy_cftypes)); mutex_unlock(&blkcg_pol_register_mutex); return 0; err_free_cpds: if (pol->cpd_alloc_fn) { list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { if (blkcg->cpd[pol->plid]) { pol->cpd_free_fn(blkcg->cpd[pol->plid]); blkcg->cpd[pol->plid] = NULL; } } } blkcg_policy[pol->plid] = NULL; err_unlock: mutex_unlock(&blkcg_pol_mutex); mutex_unlock(&blkcg_pol_register_mutex); return ret; } EXPORT_SYMBOL_GPL(blkcg_policy_register); /** * blkcg_policy_unregister - unregister a blkcg policy * @pol: blkcg policy to unregister * * Undo blkcg_policy_register(@pol). Might sleep. */ void blkcg_policy_unregister(struct blkcg_policy *pol) { struct blkcg *blkcg; mutex_lock(&blkcg_pol_register_mutex); if (WARN_ON(blkcg_policy[pol->plid] != pol)) goto out_unlock; /* kill the intf files first */ if (pol->dfl_cftypes) cgroup_rm_cftypes(pol->dfl_cftypes); if (pol->legacy_cftypes) cgroup_rm_cftypes(pol->legacy_cftypes); /* remove cpds and unregister */ mutex_lock(&blkcg_pol_mutex); if (pol->cpd_alloc_fn) { list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { if (blkcg->cpd[pol->plid]) { pol->cpd_free_fn(blkcg->cpd[pol->plid]); blkcg->cpd[pol->plid] = NULL; } } } blkcg_policy[pol->plid] = NULL; mutex_unlock(&blkcg_pol_mutex); out_unlock: mutex_unlock(&blkcg_pol_register_mutex); } EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
gpl-2.0
antmicro/enclustra_zynq_linux
drivers/net/can/dev.c
47
20288
/* * Copyright (C) 2005 Marc Kleine-Budde, Pengutronix * Copyright (C) 2006 Andrey Volkov, Varma Electronics * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/netlink.h> #include <net/rtnetlink.h> #define MOD_DESC "CAN device driver interface" MODULE_DESCRIPTION(MOD_DESC); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); #ifdef CONFIG_CAN_CALC_BITTIMING #define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ /* * Bit-timing calculation derived from: * * Code based on LinCAN sources and H8S2638 project * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz * Copyright 2005 Stanislav Marek * email: pisa@cmp.felk.cvut.cz * * Calculates proper bit-timing parameters for a specified bit-rate * and sample-point, which can then be used to set the bit-timing * registers of the CAN controller. You can find more information * in the header file linux/can/netlink.h. */ static int can_update_spt(const struct can_bittiming_const *btc, int sampl_pt, int tseg, int *tseg1, int *tseg2) { *tseg2 = tseg + 1 - (sampl_pt * (tseg + 1)) / 1000; if (*tseg2 < btc->tseg2_min) *tseg2 = btc->tseg2_min; if (*tseg2 > btc->tseg2_max) *tseg2 = btc->tseg2_max; *tseg1 = tseg - *tseg2; if (*tseg1 > btc->tseg1_max) { *tseg1 = btc->tseg1_max; *tseg2 = tseg - *tseg1; } return 1000 * (tseg + 1 - *tseg2) / (tseg + 1); } static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt) { struct can_priv *priv = netdev_priv(dev); const struct can_bittiming_const *btc = priv->bittiming_const; long rate, best_rate = 0; long best_error = 1000000000, error = 0; int best_tseg = 0, best_brp = 0, brp = 0; int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0; int spt_error = 1000, spt = 0, sampl_pt; u64 v64; if (!priv->bittiming_const) return -ENOTSUPP; /* Use CIA recommended sample points */ if (bt->sample_point) { sampl_pt = bt->sample_point; } else { if (bt->bitrate > 800000) sampl_pt = 750; else if (bt->bitrate > 500000) sampl_pt = 800; else sampl_pt = 875; } /* tseg even = round down, odd = round up */ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1; tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) { tsegall = 1 + tseg / 2; /* Compute all possible tseg choices (tseg=tseg1+tseg2) */ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2; /* chose brp step which is possible in system */ brp = (brp / btc->brp_inc) * btc->brp_inc; if ((brp < btc->brp_min) || (brp > btc->brp_max)) continue; rate = priv->clock.freq / (brp * tsegall); error = bt->bitrate - rate; /* tseg brp biterror */ if (error < 0) error = -error; if (error > best_error) continue; best_error = error; if (error == 0) { spt = can_update_spt(btc, sampl_pt, tseg / 2, &tseg1, &tseg2); error = sampl_pt - spt; if (error < 0) error = -error; if (error > spt_error) continue; spt_error = error; } best_tseg = tseg / 2; best_brp = brp; best_rate = rate; if (error == 0) break; } if (best_error) { /* Error in one-tenth of a percent */ error = (best_error * 1000) / bt->bitrate; if (error > CAN_CALC_MAX_ERROR) { netdev_err(dev, "bitrate error %ld.%ld%% too high\n", error / 10, error % 10); return -EDOM; } else { netdev_warn(dev, "bitrate error %ld.%ld%%\n", error / 10, error % 10); } } /* real sample point */ bt->sample_point = can_update_spt(btc, sampl_pt, best_tseg, &tseg1, &tseg2); v64 = (u64)best_brp * 1000000000UL; do_div(v64, priv->clock.freq); bt->tq = (u32)v64; bt->prop_seg = tseg1 / 2; bt->phase_seg1 = tseg1 - bt->prop_seg; bt->phase_seg2 = tseg2; /* check for sjw user settings */ if (!bt->sjw || !btc->sjw_max) bt->sjw = 1; else { /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */ if (bt->sjw > btc->sjw_max) bt->sjw = btc->sjw_max; /* bt->sjw must not be higher than tseg2 */ if (tseg2 < bt->sjw) bt->sjw = tseg2; } bt->brp = best_brp; /* real bit-rate */ bt->bitrate = priv->clock.freq / (bt->brp * (tseg1 + tseg2 + 1)); return 0; } #else /* !CONFIG_CAN_CALC_BITTIMING */ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt) { netdev_err(dev, "bit-timing calculation not available\n"); return -EINVAL; } #endif /* CONFIG_CAN_CALC_BITTIMING */ /* * Checks the validity of the specified bit-timing parameters prop_seg, * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate * prescaler value brp. You can find more information in the header * file linux/can/netlink.h. */ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt) { struct can_priv *priv = netdev_priv(dev); const struct can_bittiming_const *btc = priv->bittiming_const; int tseg1, alltseg; u64 brp64; if (!priv->bittiming_const) return -ENOTSUPP; tseg1 = bt->prop_seg + bt->phase_seg1; if (!bt->sjw) bt->sjw = 1; if (bt->sjw > btc->sjw_max || tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max || bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max) return -ERANGE; brp64 = (u64)priv->clock.freq * (u64)bt->tq; if (btc->brp_inc > 1) do_div(brp64, btc->brp_inc); brp64 += 500000000UL - 1; do_div(brp64, 1000000000UL); /* the practicable BRP */ if (btc->brp_inc > 1) brp64 *= btc->brp_inc; bt->brp = (u32)brp64; if (bt->brp < btc->brp_min || bt->brp > btc->brp_max) return -EINVAL; alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1; bt->bitrate = priv->clock.freq / (bt->brp * alltseg); bt->sample_point = ((tseg1 + 1) * 1000) / alltseg; return 0; } static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt) { struct can_priv *priv = netdev_priv(dev); int err; /* Check if the CAN device has bit-timing parameters */ if (priv->bittiming_const) { /* Non-expert mode? Check if the bitrate has been pre-defined */ if (!bt->tq) /* Determine bit-timing parameters */ err = can_calc_bittiming(dev, bt); else /* Check bit-timing params and calculate proper brp */ err = can_fixup_bittiming(dev, bt); if (err) return err; } return 0; } /* * Local echo of CAN messages * * CAN network devices *should* support a local echo functionality * (see Documentation/networking/can.txt). To test the handling of CAN * interfaces that do not support the local echo both driver types are * implemented. In the case that the driver does not support the echo * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core * to perform the echo as a fallback solution. */ static void can_flush_echo_skb(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; int i; for (i = 0; i < priv->echo_skb_max; i++) { if (priv->echo_skb[i]) { kfree_skb(priv->echo_skb[i]); priv->echo_skb[i] = NULL; stats->tx_dropped++; stats->tx_aborted_errors++; } } } /* * Put the skb on the stack to be looped backed locally lateron * * The function is typically called in the start_xmit function * of the device driver. The driver must protect access to * priv->echo_skb, if necessary. */ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int idx) { struct can_priv *priv = netdev_priv(dev); BUG_ON(idx >= priv->echo_skb_max); /* check flag whether this packet has to be looped back */ if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) { kfree_skb(skb); return; } if (!priv->echo_skb[idx]) { struct sock *srcsk = skb->sk; if (atomic_read(&skb->users) != 1) { struct sk_buff *old_skb = skb; skb = skb_clone(old_skb, GFP_ATOMIC); kfree_skb(old_skb); if (!skb) return; } else skb_orphan(skb); skb->sk = srcsk; /* make settings for echo to reduce code in irq context */ skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; skb->dev = dev; /* save this skb for tx interrupt echo handling */ priv->echo_skb[idx] = skb; } else { /* locking problem with netif_stop_queue() ?? */ netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__); kfree_skb(skb); } } EXPORT_SYMBOL_GPL(can_put_echo_skb); /* * Get the skb from the stack and loop it back locally * * The function is typically called when the TX done interrupt * is handled in the device driver. The driver must protect * access to priv->echo_skb, if necessary. */ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) { struct can_priv *priv = netdev_priv(dev); BUG_ON(idx >= priv->echo_skb_max); if (priv->echo_skb[idx]) { struct sk_buff *skb = priv->echo_skb[idx]; struct can_frame *cf = (struct can_frame *)skb->data; u8 dlc = cf->can_dlc; netif_rx(priv->echo_skb[idx]); priv->echo_skb[idx] = NULL; return dlc; } return 0; } EXPORT_SYMBOL_GPL(can_get_echo_skb); /* * Remove the skb from the stack and free it. * * The function is typically called when TX failed. */ void can_free_echo_skb(struct net_device *dev, unsigned int idx) { struct can_priv *priv = netdev_priv(dev); BUG_ON(idx >= priv->echo_skb_max); if (priv->echo_skb[idx]) { kfree_skb(priv->echo_skb[idx]); priv->echo_skb[idx] = NULL; } } EXPORT_SYMBOL_GPL(can_free_echo_skb); /* * CAN device restart for bus-off recovery */ void can_restart(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; struct can_frame *cf; int err; BUG_ON(netif_carrier_ok(dev)); /* * No synchronization needed because the device is bus-off and * no messages can come in or go out. */ can_flush_echo_skb(dev); /* send restart message upstream */ skb = alloc_can_err_skb(dev, &cf); if (skb == NULL) { err = -ENOMEM; goto restart; } cf->can_id |= CAN_ERR_RESTARTED; netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; restart: netdev_dbg(dev, "restarted\n"); priv->can_stats.restarts++; /* Now restart the device */ err = priv->do_set_mode(dev, CAN_MODE_START); netif_carrier_on(dev); if (err) netdev_err(dev, "Error %d during restart", err); } int can_restart_now(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); /* * A manual restart is only permitted if automatic restart is * disabled and the device is in the bus-off state */ if (priv->restart_ms) return -EINVAL; if (priv->state != CAN_STATE_BUS_OFF) return -EBUSY; /* Runs as soon as possible in the timer context */ mod_timer(&priv->restart_timer, jiffies); return 0; } /* * CAN bus-off * * This functions should be called when the device goes bus-off to * tell the netif layer that no more packets can be sent or received. * If enabled, a timer is started to trigger bus-off recovery. */ void can_bus_off(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); netdev_dbg(dev, "bus-off\n"); netif_carrier_off(dev); priv->can_stats.bus_off++; if (priv->restart_ms) mod_timer(&priv->restart_timer, jiffies + (priv->restart_ms * HZ) / 1000); } EXPORT_SYMBOL_GPL(can_bus_off); static void can_setup(struct net_device *dev) { dev->type = ARPHRD_CAN; dev->mtu = sizeof(struct can_frame); dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 10; /* New-style flags. */ dev->flags = IFF_NOARP; dev->features = NETIF_F_HW_CSUM; } struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) { struct sk_buff *skb; skb = netdev_alloc_skb(dev, sizeof(struct can_frame)); if (unlikely(!skb)) return NULL; skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); memset(*cf, 0, sizeof(struct can_frame)); return skb; } EXPORT_SYMBOL_GPL(alloc_can_skb); struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf) { struct sk_buff *skb; skb = alloc_can_skb(dev, cf); if (unlikely(!skb)) return NULL; (*cf)->can_id = CAN_ERR_FLAG; (*cf)->can_dlc = CAN_ERR_DLC; return skb; } EXPORT_SYMBOL_GPL(alloc_can_err_skb); /* * Allocate and setup space for the CAN network device */ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) { struct net_device *dev; struct can_priv *priv; int size; if (echo_skb_max) size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) + echo_skb_max * sizeof(struct sk_buff *); else size = sizeof_priv; dev = alloc_netdev(size, "can%d", can_setup); if (!dev) return NULL; priv = netdev_priv(dev); if (echo_skb_max) { priv->echo_skb_max = echo_skb_max; priv->echo_skb = (void *)priv + ALIGN(sizeof_priv, sizeof(struct sk_buff *)); } priv->state = CAN_STATE_STOPPED; init_timer(&priv->restart_timer); return dev; } EXPORT_SYMBOL_GPL(alloc_candev); /* * Free space of the CAN network device */ void free_candev(struct net_device *dev) { free_netdev(dev); } EXPORT_SYMBOL_GPL(free_candev); /* * Common open function when the device gets opened. * * This function should be called in the open function of the device * driver. */ int open_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); if (!priv->bittiming.tq && !priv->bittiming.bitrate) { netdev_err(dev, "bit-timing not yet defined\n"); return -EINVAL; } /* Switch carrier on if device was stopped while in bus-off state */ if (!netif_carrier_ok(dev)) netif_carrier_on(dev); setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev); return 0; } EXPORT_SYMBOL_GPL(open_candev); /* * Common close function for cleanup before the device gets closed. * * This function should be called in the close function of the device * driver. */ void close_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); if (del_timer_sync(&priv->restart_timer)) dev_put(dev); can_flush_echo_skb(dev); } EXPORT_SYMBOL_GPL(close_candev); /* * CAN netlink interface */ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { [IFLA_CAN_STATE] = { .type = NLA_U32 }, [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 }, [IFLA_CAN_RESTART] = { .type = NLA_U32 }, [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) }, [IFLA_CAN_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, }; static int can_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct can_priv *priv = netdev_priv(dev); int err; /* We need synchronization with dev->stop() */ ASSERT_RTNL(); if (data[IFLA_CAN_CTRLMODE]) { struct can_ctrlmode *cm; /* Do not allow changing controller mode while running */ if (dev->flags & IFF_UP) return -EBUSY; cm = nla_data(data[IFLA_CAN_CTRLMODE]); if (cm->flags & ~priv->ctrlmode_supported) return -EOPNOTSUPP; priv->ctrlmode &= ~cm->mask; priv->ctrlmode |= cm->flags; } if (data[IFLA_CAN_BITTIMING]) { struct can_bittiming bt; /* Do not allow changing bittiming while running */ if (dev->flags & IFF_UP) return -EBUSY; memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); if ((!bt.bitrate && !bt.tq) || (bt.bitrate && bt.tq)) return -EINVAL; err = can_get_bittiming(dev, &bt); if (err) return err; memcpy(&priv->bittiming, &bt, sizeof(bt)); if (priv->do_set_bittiming) { /* Finally, set the bit-timing registers */ err = priv->do_set_bittiming(dev); if (err) return err; } } if (data[IFLA_CAN_RESTART_MS]) { /* Do not allow changing restart delay while running */ if (dev->flags & IFF_UP) return -EBUSY; priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); } if (data[IFLA_CAN_RESTART]) { /* Do not allow a restart while not running */ if (!(dev->flags & IFF_UP)) return -EINVAL; err = can_restart_now(dev); if (err) return err; } return 0; } static size_t can_get_size(const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); size_t size; size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */ size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ size += sizeof(struct can_berr_counter); if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ size += sizeof(struct can_bittiming_const); return size; } static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); struct can_ctrlmode cm = {.flags = priv->ctrlmode}; struct can_berr_counter bec; enum can_state state = priv->state; if (priv->do_get_state) priv->do_get_state(dev, &state); if (nla_put_u32(skb, IFLA_CAN_STATE, state) || nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) || nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) || nla_put(skb, IFLA_CAN_BITTIMING, sizeof(priv->bittiming), &priv->bittiming) || nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) || (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec) && nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) || (priv->bittiming_const && nla_put(skb, IFLA_CAN_BITTIMING_CONST, sizeof(*priv->bittiming_const), priv->bittiming_const))) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static size_t can_get_xstats_size(const struct net_device *dev) { return sizeof(struct can_device_stats); } static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); if (nla_put(skb, IFLA_INFO_XSTATS, sizeof(priv->can_stats), &priv->can_stats)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static int can_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { return -EOPNOTSUPP; } static struct rtnl_link_ops can_link_ops __read_mostly = { .kind = "can", .maxtype = IFLA_CAN_MAX, .policy = can_policy, .setup = can_setup, .newlink = can_newlink, .changelink = can_changelink, .get_size = can_get_size, .fill_info = can_fill_info, .get_xstats_size = can_get_xstats_size, .fill_xstats = can_fill_xstats, }; /* * Register the CAN network device */ int register_candev(struct net_device *dev) { dev->rtnl_link_ops = &can_link_ops; return register_netdev(dev); } EXPORT_SYMBOL_GPL(register_candev); /* * Unregister the CAN network device */ void unregister_candev(struct net_device *dev) { unregister_netdev(dev); } EXPORT_SYMBOL_GPL(unregister_candev); static __init int can_dev_init(void) { int err; err = rtnl_link_register(&can_link_ops); if (!err) printk(KERN_INFO MOD_DESC "\n"); return err; } module_init(can_dev_init); static __exit void can_dev_exit(void) { rtnl_link_unregister(&can_link_ops); } module_exit(can_dev_exit); MODULE_ALIAS_RTNL_LINK("can");
gpl-2.0
jazzsir/iamroot-linux-arm10c
fs/proc/fd.c
47
7570
#include <linux/sched.h> #include <linux/errno.h> #include <linux/dcache.h> #include <linux/path.h> #include <linux/fdtable.h> #include <linux/namei.h> #include <linux/pid.h> #include <linux/security.h> #include <linux/file.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include "internal.h" #include "fd.h" static int seq_show(struct seq_file *m, void *v) { struct files_struct *files = NULL; int f_flags = 0, ret = -ENOENT; struct file *file = NULL; struct task_struct *task; task = get_proc_task(m->private); if (!task) return -ENOENT; files = get_files_struct(task); put_task_struct(task); if (files) { int fd = proc_fd(m->private); spin_lock(&files->file_lock); file = fcheck_files(files, fd); if (file) { struct fdtable *fdt = files_fdtable(files); f_flags = file->f_flags; if (close_on_exec(fd, fdt)) f_flags |= O_CLOEXEC; get_file(file); ret = 0; } spin_unlock(&files->file_lock); put_files_struct(files); } if (!ret) { seq_printf(m, "pos:\t%lli\nflags:\t0%o\n", (long long)file->f_pos, f_flags); if (file->f_op->show_fdinfo) ret = file->f_op->show_fdinfo(m, file); fput(file); } return ret; } static int seq_fdinfo_open(struct inode *inode, struct file *file) { return single_open(file, seq_show, inode); } static const struct file_operations proc_fdinfo_file_operations = { .open = seq_fdinfo_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags) { struct files_struct *files; struct task_struct *task; const struct cred *cred; struct inode *inode; int fd; if (flags & LOOKUP_RCU) return -ECHILD; inode = dentry->d_inode; task = get_proc_task(inode); fd = proc_fd(inode); if (task) { files = get_files_struct(task); if (files) { struct file *file; rcu_read_lock(); file = fcheck_files(files, fd); if (file) { unsigned f_mode = file->f_mode; rcu_read_unlock(); put_files_struct(files); if (task_dumpable(task)) { rcu_read_lock(); cred = __task_cred(task); inode->i_uid = cred->euid; inode->i_gid = cred->egid; rcu_read_unlock(); } else { inode->i_uid = GLOBAL_ROOT_UID; inode->i_gid = GLOBAL_ROOT_GID; } if (S_ISLNK(inode->i_mode)) { unsigned i_mode = S_IFLNK; if (f_mode & FMODE_READ) i_mode |= S_IRUSR | S_IXUSR; if (f_mode & FMODE_WRITE) i_mode |= S_IWUSR | S_IXUSR; inode->i_mode = i_mode; } security_task_to_inode(task, inode); put_task_struct(task); return 1; } rcu_read_unlock(); put_files_struct(files); } put_task_struct(task); } d_drop(dentry); return 0; } static const struct dentry_operations tid_fd_dentry_operations = { .d_revalidate = tid_fd_revalidate, .d_delete = pid_delete_dentry, }; static int proc_fd_link(struct dentry *dentry, struct path *path) { struct files_struct *files = NULL; struct task_struct *task; int ret = -ENOENT; task = get_proc_task(dentry->d_inode); if (task) { files = get_files_struct(task); put_task_struct(task); } if (files) { int fd = proc_fd(dentry->d_inode); struct file *fd_file; spin_lock(&files->file_lock); fd_file = fcheck_files(files, fd); if (fd_file) { *path = fd_file->f_path; path_get(&fd_file->f_path); ret = 0; } spin_unlock(&files->file_lock); put_files_struct(files); } return ret; } static int proc_fd_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { unsigned fd = (unsigned long)ptr; struct proc_inode *ei; struct inode *inode; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; ei = PROC_I(inode); ei->fd = fd; inode->i_mode = S_IFLNK; inode->i_op = &proc_pid_link_inode_operations; inode->i_size = 64; ei->op.proc_get_link = proc_fd_link; d_set_d_op(dentry, &tid_fd_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (tid_fd_revalidate(dentry, 0)) return 0; out: return -ENOENT; } static struct dentry *proc_lookupfd_common(struct inode *dir, struct dentry *dentry, instantiate_t instantiate) { struct task_struct *task = get_proc_task(dir); int result = -ENOENT; unsigned fd = name_to_int(dentry); if (!task) goto out_no_task; if (fd == ~0U) goto out; result = instantiate(dir, dentry, task, (void *)(unsigned long)fd); out: put_task_struct(task); out_no_task: return ERR_PTR(result); } static int proc_readfd_common(struct file *file, struct dir_context *ctx, instantiate_t instantiate) { struct task_struct *p = get_proc_task(file_inode(file)); struct files_struct *files; unsigned int fd; if (!p) return -ENOENT; if (!dir_emit_dots(file, ctx)) goto out; files = get_files_struct(p); if (!files) goto out; rcu_read_lock(); for (fd = ctx->pos - 2; fd < files_fdtable(files)->max_fds; fd++, ctx->pos++) { char name[PROC_NUMBUF]; int len; if (!fcheck_files(files, fd)) continue; rcu_read_unlock(); len = snprintf(name, sizeof(name), "%d", fd); if (!proc_fill_cache(file, ctx, name, len, instantiate, p, (void *)(unsigned long)fd)) goto out_fd_loop; rcu_read_lock(); } rcu_read_unlock(); out_fd_loop: put_files_struct(files); out: put_task_struct(p); return 0; } static int proc_readfd(struct file *file, struct dir_context *ctx) { return proc_readfd_common(file, ctx, proc_fd_instantiate); } const struct file_operations proc_fd_operations = { .read = generic_read_dir, .iterate = proc_readfd, .llseek = default_llseek, }; static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry, unsigned int flags) { return proc_lookupfd_common(dir, dentry, proc_fd_instantiate); } /* * /proc/pid/fd needs a special permission handler so that a process can still * access /proc/self/fd after it has executed a setuid(). */ int proc_fd_permission(struct inode *inode, int mask) { int rv = generic_permission(inode, mask); if (rv == 0) return 0; if (task_pid(current) == proc_pid(inode)) rv = 0; return rv; } const struct inode_operations proc_fd_inode_operations = { .lookup = proc_lookupfd, .permission = proc_fd_permission, .setattr = proc_setattr, }; static int proc_fdinfo_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { unsigned fd = (unsigned long)ptr; struct proc_inode *ei; struct inode *inode; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; ei = PROC_I(inode); ei->fd = fd; inode->i_mode = S_IFREG | S_IRUSR; inode->i_fop = &proc_fdinfo_file_operations; d_set_d_op(dentry, &tid_fd_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (tid_fd_revalidate(dentry, 0)) return 0; out: return -ENOENT; } static struct dentry * proc_lookupfdinfo(struct inode *dir, struct dentry *dentry, unsigned int flags) { return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate); } static int proc_readfdinfo(struct file *file, struct dir_context *ctx) { return proc_readfd_common(file, ctx, proc_fdinfo_instantiate); } const struct inode_operations proc_fdinfo_inode_operations = { .lookup = proc_lookupfdinfo, .setattr = proc_setattr, }; const struct file_operations proc_fdinfo_operations = { .read = generic_read_dir, .iterate = proc_readfdinfo, .llseek = default_llseek, };
gpl-2.0
EdwinMoq/android_kernel_lge_omap4-common
arch/arm/mach-omap2/pm24xx.c
303
12616
/* * OMAP2 Power Management Routines * * Copyright (C) 2005 Texas Instruments, Inc. * Copyright (C) 2006-2008 Nokia Corporation * * Written by: * Richard Woodruff <r-woodruff2@ti.com> * Tony Lindgren * Juha Yrjola * Amit Kucheria <amit.kucheria@nokia.com> * Igor Stoppa <igor.stoppa@nokia.com> * * Based on pm.c for omap1 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/suspend.h> #include <linux/sched.h> #include <linux/proc_fs.h> #include <linux/interrupt.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/time.h> #include <linux/gpio.h> #include <linux/console.h> #include <asm/mach/time.h> #include <asm/mach/irq.h> #include <asm/mach-types.h> #include <mach/irqs.h> #include <plat/clock.h> #include <plat/sram.h> #include <plat/dma.h> #include <plat/board.h> #include "prm2xxx_3xxx.h" #include "prm-regbits-24xx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" #include "sdrc.h" #include "pm.h" #include "control.h" #include "powerdomain.h" #include "clockdomain.h" #ifdef CONFIG_SUSPEND static suspend_state_t suspend_state = PM_SUSPEND_ON; static inline bool is_suspending(void) { return (suspend_state != PM_SUSPEND_ON); } #else static inline bool is_suspending(void) { return false; } #endif static void (*omap2_sram_idle)(void); static void (*omap2_sram_suspend)(u32 dllctrl, void __iomem *sdrc_dlla_ctrl, void __iomem *sdrc_power); static struct powerdomain *mpu_pwrdm, *core_pwrdm; static struct clockdomain *dsp_clkdm, *mpu_clkdm, *wkup_clkdm, *gfx_clkdm; static struct clk *osc_ck, *emul_ck; static int omap2_fclks_active(void) { u32 f1, f2; f1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); f2 = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2); /* Ignore UART clocks. These are handled by UART core (serial.c) */ f1 &= ~(OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_UART2_MASK); f2 &= ~OMAP24XX_EN_UART3_MASK; if (f1 | f2) return 1; return 0; } static void omap2_enter_full_retention(void) { u32 l; struct timespec ts_preidle, ts_postidle, ts_idle; /* There is 1 reference hold for all children of the oscillator * clock, the following will remove it. If no one else uses the * oscillator itself it will be disabled if/when we enter retention * mode. */ clk_disable(osc_ck); /* Clear old wake-up events */ /* REVISIT: These write to reserved bits? */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST); /* * Set MPU powerdomain's next power state to RETENTION; * preserve logic state during retention */ pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); /* Workaround to kill USB */ l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL; omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0); omap2_gpio_prepare_for_idle(0); if (omap2_pm_debug) { omap2_pm_dump(0, 0, 0); getnstimeofday(&ts_preidle); } /* One last check for pending IRQs to avoid extra latency due * to sleeping unnecessarily. */ if (omap_irq_pending()) goto no_sleep; /* Jump to SRAM suspend code */ omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_POWER)); omap_uart_resume_idle(); no_sleep: if (omap2_pm_debug) { unsigned long long tmp; getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC; omap2_pm_dump(0, 1, tmp); } omap2_gpio_resume_after_idle(0); clk_enable(osc_ck); /* clear CORE wake-up events */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); /* wakeup domain events - bit 1: GPT1, bit5 GPIO */ omap2_prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST); /* MPU domain wake events */ l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x01) omap2_prm_write_mod_reg(0x01, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x20) omap2_prm_write_mod_reg(0x20, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); /* Mask future PRCM-to-MPU interrupts */ omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); } static int omap2_i2c_active(void) { u32 l; l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); return l & (OMAP2420_EN_I2C2_MASK | OMAP2420_EN_I2C1_MASK); } static int sti_console_enabled; static int omap2_allow_mpu_retention(void) { u32 l; /* Check for MMC, UART2, UART1, McSPI2, McSPI1 and DSS1. */ l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); if (l & (OMAP2420_EN_MMC_MASK | OMAP24XX_EN_UART2_MASK | OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_MCSPI2_MASK | OMAP24XX_EN_MCSPI1_MASK | OMAP24XX_EN_DSS1_MASK)) return 0; /* Check for UART3. */ l = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2); if (l & OMAP24XX_EN_UART3_MASK) return 0; if (sti_console_enabled) return 0; return 1; } static void omap2_enter_mpu_retention(void) { int only_idle = 0; struct timespec ts_preidle, ts_postidle, ts_idle; /* Putting MPU into the WFI state while a transfer is active * seems to cause the I2C block to timeout. Why? Good question. */ if (omap2_i2c_active()) return; /* The peripherals seem not to be able to wake up the MPU when * it is in retention mode. */ if (omap2_allow_mpu_retention()) { /* REVISIT: These write to reserved bits? */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST); /* Try to enter MPU retention */ omap2_prm_write_mod_reg((0x01 << OMAP_POWERSTATE_SHIFT) | OMAP_LOGICRETSTATE_MASK, MPU_MOD, OMAP2_PM_PWSTCTRL); } else { /* Block MPU retention */ omap2_prm_write_mod_reg(OMAP_LOGICRETSTATE_MASK, MPU_MOD, OMAP2_PM_PWSTCTRL); only_idle = 1; } if (omap2_pm_debug) { omap2_pm_dump(only_idle ? 2 : 1, 0, 0); getnstimeofday(&ts_preidle); } omap2_sram_idle(); if (omap2_pm_debug) { unsigned long long tmp; getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC; omap2_pm_dump(only_idle ? 2 : 1, 1, tmp); } } static int omap2_can_sleep(void) { if (omap2_fclks_active()) return 0; if (osc_ck->usecount > 1) return 0; if (omap_dma_running()) return 0; return 1; } static void omap2_pm_idle(void) { local_irq_disable(); local_fiq_disable(); if (!omap2_can_sleep()) { if (omap_irq_pending()) goto out; omap2_enter_mpu_retention(); goto out; } if (omap_irq_pending()) goto out; omap2_enter_full_retention(); out: local_fiq_enable(); local_irq_enable(); } #ifdef CONFIG_SUSPEND static int omap2_pm_begin(suspend_state_t state) { disable_hlt(); suspend_state = state; return 0; } static int omap2_pm_suspend(void) { u32 wken_wkup, mir1; wken_wkup = omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN); wken_wkup &= ~OMAP24XX_EN_GPT1_MASK; omap2_prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN); /* Mask GPT1 */ mir1 = omap_readl(0x480fe0a4); omap_writel(1 << 5, 0x480fe0ac); omap2_enter_full_retention(); omap_writel(mir1, 0x480fe0a4); omap2_prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN); return 0; } static int omap2_pm_enter(suspend_state_t state) { int ret = 0; switch (state) { case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: ret = omap2_pm_suspend(); break; default: ret = -EINVAL; } return ret; } static void omap2_pm_end(void) { suspend_state = PM_SUSPEND_ON; enable_hlt(); } static const struct platform_suspend_ops omap_pm_ops = { .begin = omap2_pm_begin, .enter = omap2_pm_enter, .end = omap2_pm_end, .valid = suspend_valid_only_mem, }; #else static const struct platform_suspend_ops __initdata omap_pm_ops; #endif /* CONFIG_SUSPEND */ /* XXX This function should be shareable between OMAP2xxx and OMAP3 */ static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) { if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO) clkdm_allow_idle(clkdm); else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP && atomic_read(&clkdm->usecount) == 0) clkdm_sleep(clkdm); return 0; } static void __init prcm_setup_regs(void) { int i, num_mem_banks; struct powerdomain *pwrdm; /* * Enable autoidle * XXX This should be handled by hwmod code or PRCM init code */ omap2_prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD, OMAP2_PRCM_SYSCONFIG_OFFSET); /* * Set CORE powerdomain memory banks to retain their contents * during RETENTION */ num_mem_banks = pwrdm_get_mem_bank_count(core_pwrdm); for (i = 0; i < num_mem_banks; i++) pwrdm_set_mem_retst(core_pwrdm, i, PWRDM_POWER_RET); /* Set CORE powerdomain's next power state to RETENTION */ pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET); /* * Set MPU powerdomain's next power state to RETENTION; * preserve logic state during retention */ pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); /* Force-power down DSP, GFX powerdomains */ pwrdm = clkdm_get_pwrdm(dsp_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); clkdm_sleep(dsp_clkdm); pwrdm = clkdm_get_pwrdm(gfx_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); clkdm_sleep(gfx_clkdm); /* Enable hardware-supervised idle for all clkdms */ clkdm_for_each(clkdms_setup, NULL); clkdm_add_wkdep(mpu_clkdm, wkup_clkdm); /* REVISIT: Configure number of 32 kHz clock cycles for sys_clk * stabilisation */ omap2_prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_CLKSSETUP_OFFSET); /* Configure automatic voltage transition */ omap2_prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTSETUP_OFFSET); omap2_prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK | (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) | OMAP24XX_MEMRETCTRL_MASK | (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) | (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT), OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET); /* Enable wake-up events */ omap2_prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK, WKUP_MOD, PM_WKEN); } static int __init omap2_pm_init(void) { u32 l; if (!cpu_is_omap24xx()) return -ENODEV; printk(KERN_INFO "Power Management for OMAP2 initializing\n"); l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET); printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); /* Look up important powerdomains */ mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); if (!mpu_pwrdm) pr_err("PM: mpu_pwrdm not found\n"); core_pwrdm = pwrdm_lookup("core_pwrdm"); if (!core_pwrdm) pr_err("PM: core_pwrdm not found\n"); /* Look up important clockdomains */ mpu_clkdm = clkdm_lookup("mpu_clkdm"); if (!mpu_clkdm) pr_err("PM: mpu_clkdm not found\n"); wkup_clkdm = clkdm_lookup("wkup_clkdm"); if (!wkup_clkdm) pr_err("PM: wkup_clkdm not found\n"); dsp_clkdm = clkdm_lookup("dsp_clkdm"); if (!dsp_clkdm) pr_err("PM: dsp_clkdm not found\n"); gfx_clkdm = clkdm_lookup("gfx_clkdm"); if (!gfx_clkdm) pr_err("PM: gfx_clkdm not found\n"); osc_ck = clk_get(NULL, "osc_ck"); if (IS_ERR(osc_ck)) { printk(KERN_ERR "could not get osc_ck\n"); return -ENODEV; } if (cpu_is_omap242x()) { emul_ck = clk_get(NULL, "emul_ck"); if (IS_ERR(emul_ck)) { printk(KERN_ERR "could not get emul_ck\n"); clk_put(osc_ck); return -ENODEV; } } prcm_setup_regs(); /* Hack to prevent MPU retention when STI console is enabled. */ { const struct omap_sti_console_config *sti; sti = omap_get_config(OMAP_TAG_STI_CONSOLE, struct omap_sti_console_config); if (sti != NULL && sti->enable) sti_console_enabled = 1; } /* * We copy the assembler sleep/wakeup routines to SRAM. * These routines need to be in SRAM as that's the only * memory the MPU can see when it wakes up. */ if (cpu_is_omap24xx()) { omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend, omap24xx_idle_loop_suspend_sz); omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend, omap24xx_cpu_suspend_sz); } suspend_set_ops(&omap_pm_ops); pm_idle = omap2_pm_idle; omap_pm_is_ready_status = true; return 0; } late_initcall(omap2_pm_init);
gpl-2.0
Phoenix-Silver/ZTE-Blade-2.6.35.10
drivers/s390/block/dasd_eckd.c
559
99561
/* * File...........: linux/drivers/s390/block/dasd_eckd.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> * Copyright IBM Corp. 1999, 2009 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 * Author.........: Nigel Hislop <hislop_nigel@emc.com> */ #define KMSG_COMPONENT "dasd-eckd" #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/hdreg.h> /* HDIO_GETGEO */ #include <linux/bio.h> #include <linux/module.h> #include <linux/init.h> #include <asm/debug.h> #include <asm/idals.h> #include <asm/ebcdic.h> #include <asm/compat.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/cio.h> #include <asm/ccwdev.h> #include <asm/itcw.h> #include "dasd_int.h" #include "dasd_eckd.h" #include "../cio/chsc.h" #ifdef PRINTK_HEADER #undef PRINTK_HEADER #endif /* PRINTK_HEADER */ #define PRINTK_HEADER "dasd(eckd):" #define ECKD_C0(i) (i->home_bytes) #define ECKD_F(i) (i->formula) #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\ (i->factors.f_0x02.f1)) #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\ (i->factors.f_0x02.f2)) #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\ (i->factors.f_0x02.f3)) #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0) #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0) #define ECKD_F6(i) (i->factor6) #define ECKD_F7(i) (i->factor7) #define ECKD_F8(i) (i->factor8) MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_eckd_discipline; /* The ccw bus type uses this table to find devices that it sends to * dasd_eckd_probe */ static struct ccw_device_id dasd_eckd_ids[] = { { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7}, { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8}, { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9}, { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa}, { /* end of list */ }, }; MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); static struct ccw_driver dasd_eckd_driver; /* see below */ #define INIT_CQR_OK 0 #define INIT_CQR_UNFORMATTED 1 #define INIT_CQR_ERROR 2 /* initial attempt at a probe function. this can be simplified once * the other detection code is gone */ static int dasd_eckd_probe (struct ccw_device *cdev) { int ret; /* set ECKD specific ccw-device options */ ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE | CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH); if (ret) { DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "dasd_eckd_probe: could not set " "ccw-device options"); return ret; } ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); return ret; } static int dasd_eckd_set_online(struct ccw_device *cdev) { return dasd_generic_set_online(cdev, &dasd_eckd_discipline); } static const int sizes_trk0[] = { 28, 148, 84 }; #define LABEL_SIZE 140 static inline unsigned int round_up_multiple(unsigned int no, unsigned int mult) { int rem = no % mult; return (rem ? no - rem + mult : no); } static inline unsigned int ceil_quot(unsigned int d1, unsigned int d2) { return (d1 + (d2 - 1)) / d2; } static unsigned int recs_per_track(struct dasd_eckd_characteristics * rdc, unsigned int kl, unsigned int dl) { int dn, kn; switch (rdc->dev_type) { case 0x3380: if (kl) return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) + ceil_quot(dl + 12, 32)); else return 1499 / (15 + ceil_quot(dl + 12, 32)); case 0x3390: dn = ceil_quot(dl + 6, 232) + 1; if (kl) { kn = ceil_quot(kl + 6, 232) + 1; return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) + 9 + ceil_quot(dl + 6 * dn, 34)); } else return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34)); case 0x9345: dn = ceil_quot(dl + 6, 232) + 1; if (kl) { kn = ceil_quot(kl + 6, 232) + 1; return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) + ceil_quot(dl + 6 * dn, 34)); } else return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34)); } return 0; } static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) { geo->cyl = (__u16) cyl; geo->head = cyl >> 16; geo->head <<= 4; geo->head |= head; } static int check_XRC (struct ccw1 *de_ccw, struct DE_eckd_data *data, struct dasd_device *device) { struct dasd_eckd_private *private; int rc; private = (struct dasd_eckd_private *) device->private; if (!private->rdc_data.facilities.XRC_supported) return 0; /* switch on System Time Stamp - needed for XRC Support */ data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ rc = get_sync_clock(&data->ep_sys_time); /* Ignore return code if sync clock is switched off. */ if (rc == -ENOSYS || rc == -EACCES) rc = 0; de_ccw->count = sizeof(struct DE_eckd_data); de_ccw->flags |= CCW_FLAG_SLI; return rc; } static int define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, unsigned int totrk, int cmd, struct dasd_device *device) { struct dasd_eckd_private *private; u32 begcyl, endcyl; u16 heads, beghead, endhead; int rc = 0; private = (struct dasd_eckd_private *) device->private; ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; ccw->flags = 0; ccw->count = 16; ccw->cda = (__u32) __pa(data); memset(data, 0, sizeof(struct DE_eckd_data)); switch (cmd) { case DASD_ECKD_CCW_READ_HOME_ADDRESS: case DASD_ECKD_CCW_READ_RECORD_ZERO: case DASD_ECKD_CCW_READ: case DASD_ECKD_CCW_READ_MT: case DASD_ECKD_CCW_READ_CKD: case DASD_ECKD_CCW_READ_CKD_MT: case DASD_ECKD_CCW_READ_KD: case DASD_ECKD_CCW_READ_KD_MT: case DASD_ECKD_CCW_READ_COUNT: data->mask.perm = 0x1; data->attributes.operation = private->attrib.operation; break; case DASD_ECKD_CCW_WRITE: case DASD_ECKD_CCW_WRITE_MT: case DASD_ECKD_CCW_WRITE_KD: case DASD_ECKD_CCW_WRITE_KD_MT: data->mask.perm = 0x02; data->attributes.operation = private->attrib.operation; rc = check_XRC (ccw, data, device); break; case DASD_ECKD_CCW_WRITE_CKD: case DASD_ECKD_CCW_WRITE_CKD_MT: data->attributes.operation = DASD_BYPASS_CACHE; rc = check_XRC (ccw, data, device); break; case DASD_ECKD_CCW_ERASE: case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: case DASD_ECKD_CCW_WRITE_RECORD_ZERO: data->mask.perm = 0x3; data->mask.auth = 0x1; data->attributes.operation = DASD_BYPASS_CACHE; rc = check_XRC (ccw, data, device); break; default: dev_err(&device->cdev->dev, "0x%x is not a known command\n", cmd); break; } data->attributes.mode = 0x3; /* ECKD */ if ((private->rdc_data.cu_type == 0x2105 || private->rdc_data.cu_type == 0x2107 || private->rdc_data.cu_type == 0x1750) && !(private->uses_cdl && trk < 2)) data->ga_extended |= 0x40; /* Regular Data Format Mode */ heads = private->rdc_data.trk_per_cyl; begcyl = trk / heads; beghead = trk % heads; endcyl = totrk / heads; endhead = totrk % heads; /* check for sequential prestage - enhance cylinder range */ if (data->attributes.operation == DASD_SEQ_PRESTAGE || data->attributes.operation == DASD_SEQ_ACCESS) { if (endcyl + private->attrib.nr_cyl < private->real_cyl) endcyl += private->attrib.nr_cyl; else endcyl = (private->real_cyl - 1); } set_ch_t(&data->beg_ext, begcyl, beghead); set_ch_t(&data->end_ext, endcyl, endhead); return rc; } static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata, struct dasd_device *device) { struct dasd_eckd_private *private; int rc; private = (struct dasd_eckd_private *) device->private; if (!private->rdc_data.facilities.XRC_supported) return 0; /* switch on System Time Stamp - needed for XRC Support */ pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid' */ pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */ pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time); /* Ignore return code if sync clock is switched off. */ if (rc == -ENOSYS || rc == -EACCES) rc = 0; return rc; } static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk, unsigned int rec_on_trk, int count, int cmd, struct dasd_device *device, unsigned int reclen, unsigned int tlf) { struct dasd_eckd_private *private; int sector; int dn, d; private = (struct dasd_eckd_private *) device->private; memset(data, 0, sizeof(*data)); sector = 0; if (rec_on_trk) { switch (private->rdc_data.dev_type) { case 0x3390: dn = ceil_quot(reclen + 6, 232); d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; break; case 0x3380: d = 7 + ceil_quot(reclen + 12, 32); sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; break; } } data->sector = sector; /* note: meaning of count depends on the operation * for record based I/O it's the number of records, but for * track based I/O it's the number of tracks */ data->count = count; switch (cmd) { case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: data->operation.orientation = 0x3; data->operation.operation = 0x03; break; case DASD_ECKD_CCW_READ_HOME_ADDRESS: data->operation.orientation = 0x3; data->operation.operation = 0x16; break; case DASD_ECKD_CCW_WRITE_RECORD_ZERO: data->operation.orientation = 0x1; data->operation.operation = 0x03; data->count++; break; case DASD_ECKD_CCW_READ_RECORD_ZERO: data->operation.orientation = 0x3; data->operation.operation = 0x16; data->count++; break; case DASD_ECKD_CCW_WRITE: case DASD_ECKD_CCW_WRITE_MT: case DASD_ECKD_CCW_WRITE_KD: case DASD_ECKD_CCW_WRITE_KD_MT: data->auxiliary.length_valid = 0x1; data->length = reclen; data->operation.operation = 0x01; break; case DASD_ECKD_CCW_WRITE_CKD: case DASD_ECKD_CCW_WRITE_CKD_MT: data->auxiliary.length_valid = 0x1; data->length = reclen; data->operation.operation = 0x03; break; case DASD_ECKD_CCW_WRITE_TRACK_DATA: data->auxiliary.length_valid = 0x1; data->length = reclen; /* not tlf, as one might think */ data->operation.operation = 0x3F; data->extended_operation = 0x23; break; case DASD_ECKD_CCW_READ: case DASD_ECKD_CCW_READ_MT: case DASD_ECKD_CCW_READ_KD: case DASD_ECKD_CCW_READ_KD_MT: data->auxiliary.length_valid = 0x1; data->length = reclen; data->operation.operation = 0x06; break; case DASD_ECKD_CCW_READ_CKD: case DASD_ECKD_CCW_READ_CKD_MT: data->auxiliary.length_valid = 0x1; data->length = reclen; data->operation.operation = 0x16; break; case DASD_ECKD_CCW_READ_COUNT: data->operation.operation = 0x06; break; case DASD_ECKD_CCW_READ_TRACK_DATA: data->auxiliary.length_valid = 0x1; data->length = tlf; data->operation.operation = 0x0C; break; case DASD_ECKD_CCW_ERASE: data->length = reclen; data->auxiliary.length_valid = 0x1; data->operation.operation = 0x0b; break; default: DBF_DEV_EVENT(DBF_ERR, device, "fill LRE unknown opcode 0x%x", cmd); BUG(); } set_ch_t(&data->seek_addr, trk / private->rdc_data.trk_per_cyl, trk % private->rdc_data.trk_per_cyl); data->search_arg.cyl = data->seek_addr.cyl; data->search_arg.head = data->seek_addr.head; data->search_arg.record = rec_on_trk; } static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, unsigned int trk, unsigned int totrk, int cmd, struct dasd_device *basedev, struct dasd_device *startdev, unsigned char format, unsigned int rec_on_trk, int count, unsigned int blksize, unsigned int tlf) { struct dasd_eckd_private *basepriv, *startpriv; struct DE_eckd_data *dedata; struct LRE_eckd_data *lredata; u32 begcyl, endcyl; u16 heads, beghead, endhead; int rc = 0; basepriv = (struct dasd_eckd_private *) basedev->private; startpriv = (struct dasd_eckd_private *) startdev->private; dedata = &pfxdata->define_extent; lredata = &pfxdata->locate_record; ccw->cmd_code = DASD_ECKD_CCW_PFX; ccw->flags = 0; ccw->count = sizeof(*pfxdata); ccw->cda = (__u32) __pa(pfxdata); memset(pfxdata, 0, sizeof(*pfxdata)); /* prefix data */ if (format > 1) { DBF_DEV_EVENT(DBF_ERR, basedev, "PFX LRE unknown format 0x%x", format); BUG(); return -EINVAL; } pfxdata->format = format; pfxdata->base_address = basepriv->ned->unit_addr; pfxdata->base_lss = basepriv->ned->ID; pfxdata->validity.define_extent = 1; /* private uid is kept up to date, conf_data may be outdated */ if (startpriv->uid.type != UA_BASE_DEVICE) { pfxdata->validity.verify_base = 1; if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) pfxdata->validity.hyper_pav = 1; } /* define extend data (mostly)*/ switch (cmd) { case DASD_ECKD_CCW_READ_HOME_ADDRESS: case DASD_ECKD_CCW_READ_RECORD_ZERO: case DASD_ECKD_CCW_READ: case DASD_ECKD_CCW_READ_MT: case DASD_ECKD_CCW_READ_CKD: case DASD_ECKD_CCW_READ_CKD_MT: case DASD_ECKD_CCW_READ_KD: case DASD_ECKD_CCW_READ_KD_MT: case DASD_ECKD_CCW_READ_COUNT: dedata->mask.perm = 0x1; dedata->attributes.operation = basepriv->attrib.operation; break; case DASD_ECKD_CCW_READ_TRACK_DATA: dedata->mask.perm = 0x1; dedata->attributes.operation = basepriv->attrib.operation; dedata->blk_size = 0; break; case DASD_ECKD_CCW_WRITE: case DASD_ECKD_CCW_WRITE_MT: case DASD_ECKD_CCW_WRITE_KD: case DASD_ECKD_CCW_WRITE_KD_MT: dedata->mask.perm = 0x02; dedata->attributes.operation = basepriv->attrib.operation; rc = check_XRC_on_prefix(pfxdata, basedev); break; case DASD_ECKD_CCW_WRITE_CKD: case DASD_ECKD_CCW_WRITE_CKD_MT: dedata->attributes.operation = DASD_BYPASS_CACHE; rc = check_XRC_on_prefix(pfxdata, basedev); break; case DASD_ECKD_CCW_ERASE: case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: case DASD_ECKD_CCW_WRITE_RECORD_ZERO: dedata->mask.perm = 0x3; dedata->mask.auth = 0x1; dedata->attributes.operation = DASD_BYPASS_CACHE; rc = check_XRC_on_prefix(pfxdata, basedev); break; case DASD_ECKD_CCW_WRITE_TRACK_DATA: dedata->mask.perm = 0x02; dedata->attributes.operation = basepriv->attrib.operation; dedata->blk_size = blksize; rc = check_XRC_on_prefix(pfxdata, basedev); break; default: DBF_DEV_EVENT(DBF_ERR, basedev, "PFX LRE unknown opcode 0x%x", cmd); BUG(); return -EINVAL; } dedata->attributes.mode = 0x3; /* ECKD */ if ((basepriv->rdc_data.cu_type == 0x2105 || basepriv->rdc_data.cu_type == 0x2107 || basepriv->rdc_data.cu_type == 0x1750) && !(basepriv->uses_cdl && trk < 2)) dedata->ga_extended |= 0x40; /* Regular Data Format Mode */ heads = basepriv->rdc_data.trk_per_cyl; begcyl = trk / heads; beghead = trk % heads; endcyl = totrk / heads; endhead = totrk % heads; /* check for sequential prestage - enhance cylinder range */ if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || dedata->attributes.operation == DASD_SEQ_ACCESS) { if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) endcyl += basepriv->attrib.nr_cyl; else endcyl = (basepriv->real_cyl - 1); } set_ch_t(&dedata->beg_ext, begcyl, beghead); set_ch_t(&dedata->end_ext, endcyl, endhead); if (format == 1) { fill_LRE_data(lredata, trk, rec_on_trk, count, cmd, basedev, blksize, tlf); } return rc; } static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, unsigned int trk, unsigned int totrk, int cmd, struct dasd_device *basedev, struct dasd_device *startdev) { return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev, 0, 0, 0, 0, 0); } static void locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk, unsigned int rec_on_trk, int no_rec, int cmd, struct dasd_device * device, int reclen) { struct dasd_eckd_private *private; int sector; int dn, d; private = (struct dasd_eckd_private *) device->private; DBF_DEV_EVENT(DBF_INFO, device, "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d", trk, rec_on_trk, no_rec, cmd, reclen); ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; ccw->flags = 0; ccw->count = 16; ccw->cda = (__u32) __pa(data); memset(data, 0, sizeof(struct LO_eckd_data)); sector = 0; if (rec_on_trk) { switch (private->rdc_data.dev_type) { case 0x3390: dn = ceil_quot(reclen + 6, 232); d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; break; case 0x3380: d = 7 + ceil_quot(reclen + 12, 32); sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; break; } } data->sector = sector; data->count = no_rec; switch (cmd) { case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: data->operation.orientation = 0x3; data->operation.operation = 0x03; break; case DASD_ECKD_CCW_READ_HOME_ADDRESS: data->operation.orientation = 0x3; data->operation.operation = 0x16; break; case DASD_ECKD_CCW_WRITE_RECORD_ZERO: data->operation.orientation = 0x1; data->operation.operation = 0x03; data->count++; break; case DASD_ECKD_CCW_READ_RECORD_ZERO: data->operation.orientation = 0x3; data->operation.operation = 0x16; data->count++; break; case DASD_ECKD_CCW_WRITE: case DASD_ECKD_CCW_WRITE_MT: case DASD_ECKD_CCW_WRITE_KD: case DASD_ECKD_CCW_WRITE_KD_MT: data->auxiliary.last_bytes_used = 0x1; data->length = reclen; data->operation.operation = 0x01; break; case DASD_ECKD_CCW_WRITE_CKD: case DASD_ECKD_CCW_WRITE_CKD_MT: data->auxiliary.last_bytes_used = 0x1; data->length = reclen; data->operation.operation = 0x03; break; case DASD_ECKD_CCW_READ: case DASD_ECKD_CCW_READ_MT: case DASD_ECKD_CCW_READ_KD: case DASD_ECKD_CCW_READ_KD_MT: data->auxiliary.last_bytes_used = 0x1; data->length = reclen; data->operation.operation = 0x06; break; case DASD_ECKD_CCW_READ_CKD: case DASD_ECKD_CCW_READ_CKD_MT: data->auxiliary.last_bytes_used = 0x1; data->length = reclen; data->operation.operation = 0x16; break; case DASD_ECKD_CCW_READ_COUNT: data->operation.operation = 0x06; break; case DASD_ECKD_CCW_ERASE: data->length = reclen; data->auxiliary.last_bytes_used = 0x1; data->operation.operation = 0x0b; break; default: DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record " "opcode 0x%x", cmd); } set_ch_t(&data->seek_addr, trk / private->rdc_data.trk_per_cyl, trk % private->rdc_data.trk_per_cyl); data->search_arg.cyl = data->seek_addr.cyl; data->search_arg.head = data->seek_addr.head; data->search_arg.record = rec_on_trk; } /* * Returns 1 if the block is one of the special blocks that needs * to get read/written with the KD variant of the command. * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT. * Luckily the KD variants differ only by one bit (0x08) from the * normal variant. So don't wonder about code like: * if (dasd_eckd_cdl_special(blk_per_trk, recid)) * ccw->cmd_code |= 0x8; */ static inline int dasd_eckd_cdl_special(int blk_per_trk, int recid) { if (recid < 3) return 1; if (recid < blk_per_trk) return 0; if (recid < 2 * blk_per_trk) return 1; return 0; } /* * Returns the record size for the special blocks of the cdl format. * Only returns something useful if dasd_eckd_cdl_special is true * for the recid. */ static inline int dasd_eckd_cdl_reclen(int recid) { if (recid < 3) return sizes_trk0[recid]; return LABEL_SIZE; } /* * Generate device unique id that specifies the physical device. */ static int dasd_eckd_generate_uid(struct dasd_device *device) { struct dasd_eckd_private *private; struct dasd_uid *uid; int count; unsigned long flags; private = (struct dasd_eckd_private *) device->private; if (!private) return -ENODEV; if (!private->ned || !private->gneq) return -ENODEV; uid = &private->uid; spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); memset(uid, 0, sizeof(struct dasd_uid)); memcpy(uid->vendor, private->ned->HDA_manufacturer, sizeof(uid->vendor) - 1); EBCASC(uid->vendor, sizeof(uid->vendor) - 1); memcpy(uid->serial, private->ned->HDA_location, sizeof(uid->serial) - 1); EBCASC(uid->serial, sizeof(uid->serial) - 1); uid->ssid = private->gneq->subsystemID; uid->real_unit_addr = private->ned->unit_addr; if (private->sneq) { uid->type = private->sneq->sua_flags; if (uid->type == UA_BASE_PAV_ALIAS) uid->base_unit_addr = private->sneq->base_unit_addr; } else { uid->type = UA_BASE_DEVICE; } if (private->vdsneq) { for (count = 0; count < 16; count++) { sprintf(uid->vduit+2*count, "%02x", private->vdsneq->uit[count]); } } spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); return 0; } static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid) { struct dasd_eckd_private *private; unsigned long flags; if (device->private) { private = (struct dasd_eckd_private *)device->private; spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); *uid = private->uid; spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); return 0; } return -EINVAL; } static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, void *rcd_buffer, struct ciw *ciw, __u8 lpm) { struct dasd_ccw_req *cqr; struct ccw1 *ccw; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count, device); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate RCD request"); return cqr; } ccw = cqr->cpaddr; ccw->cmd_code = ciw->cmd; ccw->cda = (__u32)(addr_t)rcd_buffer; ccw->count = ciw->count; cqr->startdev = device; cqr->memdev = device; cqr->block = NULL; cqr->expires = 10*HZ; cqr->lpm = lpm; cqr->retries = 256; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } static int dasd_eckd_read_conf_lpm(struct dasd_device *device, void **rcd_buffer, int *rcd_buffer_size, __u8 lpm) { struct ciw *ciw; char *rcd_buf = NULL; int ret; struct dasd_ccw_req *cqr; /* * scan for RCD command in extended SenseID data */ ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); if (!ciw || ciw->cmd == 0) { ret = -EOPNOTSUPP; goto out_error; } rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); if (!rcd_buf) { ret = -ENOMEM; goto out_error; } /* * buffer has to start with EBCDIC "V1.0" to show * support for virtual device SNEQ */ rcd_buf[0] = 0xE5; rcd_buf[1] = 0xF1; rcd_buf[2] = 0x4B; rcd_buf[3] = 0xF0; cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm); if (IS_ERR(cqr)) { ret = PTR_ERR(cqr); goto out_error; } ret = dasd_sleep_on(cqr); /* * on success we update the user input parms */ dasd_sfree_request(cqr, cqr->memdev); if (ret) goto out_error; *rcd_buffer_size = ciw->count; *rcd_buffer = rcd_buf; return 0; out_error: kfree(rcd_buf); *rcd_buffer = NULL; *rcd_buffer_size = 0; return ret; } static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private) { struct dasd_sneq *sneq; int i, count; private->ned = NULL; private->sneq = NULL; private->vdsneq = NULL; private->gneq = NULL; count = private->conf_len / sizeof(struct dasd_sneq); sneq = (struct dasd_sneq *)private->conf_data; for (i = 0; i < count; ++i) { if (sneq->flags.identifier == 1 && sneq->format == 1) private->sneq = sneq; else if (sneq->flags.identifier == 1 && sneq->format == 4) private->vdsneq = (struct vd_sneq *)sneq; else if (sneq->flags.identifier == 2) private->gneq = (struct dasd_gneq *)sneq; else if (sneq->flags.identifier == 3 && sneq->res1 == 1) private->ned = (struct dasd_ned *)sneq; sneq++; } if (!private->ned || !private->gneq) { private->ned = NULL; private->sneq = NULL; private->vdsneq = NULL; private->gneq = NULL; return -EINVAL; } return 0; }; static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len) { struct dasd_gneq *gneq; int i, count, found; count = conf_len / sizeof(*gneq); gneq = (struct dasd_gneq *)conf_data; found = 0; for (i = 0; i < count; ++i) { if (gneq->flags.identifier == 2) { found = 1; break; } gneq++; } if (found) return ((char *)gneq)[18] & 0x07; else return 0; } static int dasd_eckd_read_conf(struct dasd_device *device) { void *conf_data; int conf_len, conf_data_saved; int rc; __u8 lpm; struct dasd_eckd_private *private; struct dasd_eckd_path *path_data; private = (struct dasd_eckd_private *) device->private; path_data = (struct dasd_eckd_path *) &private->path_data; path_data->opm = ccw_device_get_path_mask(device->cdev); lpm = 0x80; conf_data_saved = 0; /* get configuration data per operational path */ for (lpm = 0x80; lpm; lpm>>= 1) { if (lpm & path_data->opm){ rc = dasd_eckd_read_conf_lpm(device, &conf_data, &conf_len, lpm); if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "Read configuration data returned " "error %d", rc); return rc; } if (conf_data == NULL) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "No configuration data " "retrieved"); continue; /* no error */ } /* save first valid configuration data */ if (!conf_data_saved) { kfree(private->conf_data); private->conf_data = conf_data; private->conf_len = conf_len; if (dasd_eckd_identify_conf_parts(private)) { private->conf_data = NULL; private->conf_len = 0; kfree(conf_data); continue; } conf_data_saved++; } switch (dasd_eckd_path_access(conf_data, conf_len)) { case 0x02: path_data->npm |= lpm; break; case 0x03: path_data->ppm |= lpm; break; } if (conf_data != private->conf_data) kfree(conf_data); } } return 0; } static int dasd_eckd_read_features(struct dasd_device *device) { struct dasd_psf_prssd_data *prssdp; struct dasd_rssd_features *features; struct dasd_ccw_req *cqr; struct ccw1 *ccw; int rc; struct dasd_eckd_private *private; private = (struct dasd_eckd_private *) device->private; memset(&private->features, 0, sizeof(struct dasd_rssd_features)); cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, (sizeof(struct dasd_psf_prssd_data) + sizeof(struct dasd_rssd_features)), device); if (IS_ERR(cqr)) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " "allocate initialization request"); return PTR_ERR(cqr); } cqr->startdev = device; cqr->memdev = device; cqr->block = NULL; cqr->retries = 256; cqr->expires = 10 * HZ; /* Prepare for Read Subsystem Data */ prssdp = (struct dasd_psf_prssd_data *) cqr->data; memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); prssdp->order = PSF_ORDER_PRSSD; prssdp->suborder = 0x41; /* Read Feature Codes */ /* all other bytes of prssdp must be zero */ ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->count = sizeof(struct dasd_psf_prssd_data); ccw->flags |= CCW_FLAG_CC; ccw->cda = (__u32)(addr_t) prssdp; /* Read Subsystem Data - feature codes */ features = (struct dasd_rssd_features *) (prssdp + 1); memset(features, 0, sizeof(struct dasd_rssd_features)); ccw++; ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = sizeof(struct dasd_rssd_features); ccw->cda = (__u32)(addr_t) features; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on(cqr); if (rc == 0) { prssdp = (struct dasd_psf_prssd_data *) cqr->data; features = (struct dasd_rssd_features *) (prssdp + 1); memcpy(&private->features, features, sizeof(struct dasd_rssd_features)); } else dev_warn(&device->cdev->dev, "Reading device feature codes" " failed with rc=%d\n", rc); dasd_sfree_request(cqr, cqr->memdev); return rc; } /* * Build CP for Perform Subsystem Function - SSC. */ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, int enable_pav) { struct dasd_ccw_req *cqr; struct dasd_psf_ssc_data *psf_ssc_data; struct ccw1 *ccw; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , sizeof(struct dasd_psf_ssc_data), device); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate PSF-SSC request"); return cqr; } psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; psf_ssc_data->order = PSF_ORDER_SSC; psf_ssc_data->suborder = 0xc0; if (enable_pav) { psf_ssc_data->suborder |= 0x08; psf_ssc_data->reserved[0] = 0x88; } ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->cda = (__u32)(addr_t)psf_ssc_data; ccw->count = 66; cqr->startdev = device; cqr->memdev = device; cqr->block = NULL; cqr->retries = 256; cqr->expires = 10*HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } /* * Perform Subsystem Function. * It is necessary to trigger CIO for channel revalidation since this * call might change behaviour of DASD devices. */ static int dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav) { struct dasd_ccw_req *cqr; int rc; cqr = dasd_eckd_build_psf_ssc(device, enable_pav); if (IS_ERR(cqr)) return PTR_ERR(cqr); rc = dasd_sleep_on(cqr); if (!rc) /* trigger CIO to reprobe devices */ css_schedule_reprobe(); dasd_sfree_request(cqr, cqr->memdev); return rc; } /* * Valide storage server of current device. */ static void dasd_eckd_validate_server(struct dasd_device *device) { int rc; struct dasd_eckd_private *private; int enable_pav; if (dasd_nopav || MACHINE_IS_VM) enable_pav = 0; else enable_pav = 1; rc = dasd_eckd_psf_ssc(device, enable_pav); /* may be requested feature is not available on server, * therefore just report error and go ahead */ private = (struct dasd_eckd_private *) device->private; DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " "returned rc=%d", private->uid.ssid, rc); } /* * Check device characteristics. * If the device is accessible using ECKD discipline, the device is enabled. */ static int dasd_eckd_check_characteristics(struct dasd_device *device) { struct dasd_eckd_private *private; struct dasd_block *block; struct dasd_uid temp_uid; int is_known, rc; int readonly; if (!ccw_device_is_pathgroup(device->cdev)) { dev_warn(&device->cdev->dev, "A channel path group could not be established\n"); return -EIO; } if (!ccw_device_is_multipath(device->cdev)) { dev_info(&device->cdev->dev, "The DASD is not operating in multipath mode\n"); } private = (struct dasd_eckd_private *) device->private; if (!private) { private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); if (!private) { dev_warn(&device->cdev->dev, "Allocating memory for private DASD data " "failed\n"); return -ENOMEM; } device->private = (void *) private; } else { memset(private, 0, sizeof(*private)); } /* Invalidate status of initial analysis. */ private->init_cqr_status = -1; /* Set default cache operations. */ private->attrib.operation = DASD_NORMAL_CACHE; private->attrib.nr_cyl = 0; /* Read Configuration Data */ rc = dasd_eckd_read_conf(device); if (rc) goto out_err1; /* Generate device unique id */ rc = dasd_eckd_generate_uid(device); if (rc) goto out_err1; dasd_eckd_get_uid(device, &temp_uid); if (temp_uid.type == UA_BASE_DEVICE) { block = dasd_alloc_block(); if (IS_ERR(block)) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "could not allocate dasd " "block structure"); rc = PTR_ERR(block); goto out_err1; } device->block = block; block->base = device; } /* register lcu with alias handling, enable PAV if this is a new lcu */ is_known = dasd_alias_make_device_known_to_lcu(device); if (is_known < 0) { rc = is_known; goto out_err2; } /* * dasd_eckd_vaildate_server is done on the first device that * is found for an LCU. All later other devices have to wait * for it, so they will read the correct feature codes. */ if (!is_known) { dasd_eckd_validate_server(device); dasd_alias_lcu_setup_complete(device); } else dasd_alias_wait_for_lcu_setup(device); /* device may report different configuration data after LCU setup */ rc = dasd_eckd_read_conf(device); if (rc) goto out_err3; /* Read Feature Codes */ dasd_eckd_read_features(device); /* Read Device Characteristics */ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, &private->rdc_data, 64); if (rc) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "Read device characteristic failed, rc=%d", rc); goto out_err3; } /* find the vaild cylinder size */ if (private->rdc_data.no_cyl == LV_COMPAT_CYL && private->rdc_data.long_no_cyl) private->real_cyl = private->rdc_data.long_no_cyl; else private->real_cyl = private->rdc_data.no_cyl; readonly = dasd_device_is_ro(device); if (readonly) set_bit(DASD_FLAG_DEVICE_RO, &device->flags); dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " "with %d cylinders, %d heads, %d sectors%s\n", private->rdc_data.dev_type, private->rdc_data.dev_model, private->rdc_data.cu_type, private->rdc_data.cu_model.model, private->real_cyl, private->rdc_data.trk_per_cyl, private->rdc_data.sec_per_trk, readonly ? ", read-only device" : ""); return 0; out_err3: dasd_alias_disconnect_device_from_lcu(device); out_err2: dasd_free_block(device->block); device->block = NULL; out_err1: kfree(private->conf_data); kfree(device->private); device->private = NULL; return rc; } static void dasd_eckd_uncheck_device(struct dasd_device *device) { struct dasd_eckd_private *private; private = (struct dasd_eckd_private *) device->private; dasd_alias_disconnect_device_from_lcu(device); private->ned = NULL; private->sneq = NULL; private->vdsneq = NULL; private->gneq = NULL; private->conf_len = 0; kfree(private->conf_data); private->conf_data = NULL; } static struct dasd_ccw_req * dasd_eckd_analysis_ccw(struct dasd_device *device) { struct dasd_eckd_private *private; struct eckd_count *count_data; struct LO_eckd_data *LO_data; struct dasd_ccw_req *cqr; struct ccw1 *ccw; int cplength, datasize; int i; private = (struct dasd_eckd_private *) device->private; cplength = 8; datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; /* Define extent for the first 3 tracks. */ define_extent(ccw++, cqr->data, 0, 2, DASD_ECKD_CCW_READ_COUNT, device); LO_data = cqr->data + sizeof(struct DE_eckd_data); /* Locate record for the first 4 records on track 0. */ ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, LO_data++, 0, 0, 4, DASD_ECKD_CCW_READ_COUNT, device, 0); count_data = private->count_area; for (i = 0; i < 4; i++) { ccw[-1].flags |= CCW_FLAG_CC; ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; ccw->flags = 0; ccw->count = 8; ccw->cda = (__u32)(addr_t) count_data; ccw++; count_data++; } /* Locate record for the first record on track 2. */ ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, LO_data++, 2, 0, 1, DASD_ECKD_CCW_READ_COUNT, device, 0); /* Read count ccw. */ ccw[-1].flags |= CCW_FLAG_CC; ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; ccw->flags = 0; ccw->count = 8; ccw->cda = (__u32)(addr_t) count_data; cqr->block = NULL; cqr->startdev = device; cqr->memdev = device; cqr->retries = 255; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } /* differentiate between 'no record found' and any other error */ static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr) { char *sense; if (init_cqr->status == DASD_CQR_DONE) return INIT_CQR_OK; else if (init_cqr->status == DASD_CQR_NEED_ERP || init_cqr->status == DASD_CQR_FAILED) { sense = dasd_get_sense(&init_cqr->irb); if (sense && (sense[1] & SNS1_NO_REC_FOUND)) return INIT_CQR_UNFORMATTED; else return INIT_CQR_ERROR; } else return INIT_CQR_ERROR; } /* * This is the callback function for the init_analysis cqr. It saves * the status of the initial analysis ccw before it frees it and kicks * the device to continue the startup sequence. This will call * dasd_eckd_do_analysis again (if the devices has not been marked * for deletion in the meantime). */ static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data) { struct dasd_eckd_private *private; struct dasd_device *device; device = init_cqr->startdev; private = (struct dasd_eckd_private *) device->private; private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr); dasd_sfree_request(init_cqr, device); dasd_kick_device(device); } static int dasd_eckd_start_analysis(struct dasd_block *block) { struct dasd_eckd_private *private; struct dasd_ccw_req *init_cqr; private = (struct dasd_eckd_private *) block->base->private; init_cqr = dasd_eckd_analysis_ccw(block->base); if (IS_ERR(init_cqr)) return PTR_ERR(init_cqr); init_cqr->callback = dasd_eckd_analysis_callback; init_cqr->callback_data = NULL; init_cqr->expires = 5*HZ; /* first try without ERP, so we can later handle unformatted * devices as special case */ clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags); init_cqr->retries = 0; dasd_add_request_head(init_cqr); return -EAGAIN; } static int dasd_eckd_end_analysis(struct dasd_block *block) { struct dasd_device *device; struct dasd_eckd_private *private; struct eckd_count *count_area; unsigned int sb, blk_per_trk; int status, i; struct dasd_ccw_req *init_cqr; device = block->base; private = (struct dasd_eckd_private *) device->private; status = private->init_cqr_status; private->init_cqr_status = -1; if (status == INIT_CQR_ERROR) { /* try again, this time with full ERP */ init_cqr = dasd_eckd_analysis_ccw(device); dasd_sleep_on(init_cqr); status = dasd_eckd_analysis_evaluation(init_cqr); dasd_sfree_request(init_cqr, device); } if (status == INIT_CQR_UNFORMATTED) { dev_warn(&device->cdev->dev, "The DASD is not formatted\n"); return -EMEDIUMTYPE; } else if (status == INIT_CQR_ERROR) { dev_err(&device->cdev->dev, "Detecting the DASD disk layout failed because " "of an I/O error\n"); return -EIO; } private->uses_cdl = 1; /* Check Track 0 for Compatible Disk Layout */ count_area = NULL; for (i = 0; i < 3; i++) { if (private->count_area[i].kl != 4 || private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) { private->uses_cdl = 0; break; } } if (i == 3) count_area = &private->count_area[4]; if (private->uses_cdl == 0) { for (i = 0; i < 5; i++) { if ((private->count_area[i].kl != 0) || (private->count_area[i].dl != private->count_area[0].dl)) break; } if (i == 5) count_area = &private->count_area[0]; } else { if (private->count_area[3].record == 1) dev_warn(&device->cdev->dev, "Track 0 has no records following the VTOC\n"); } if (count_area != NULL && count_area->kl == 0) { /* we found notthing violating our disk layout */ if (dasd_check_blocksize(count_area->dl) == 0) block->bp_block = count_area->dl; } if (block->bp_block == 0) { dev_warn(&device->cdev->dev, "The disk layout of the DASD is not supported\n"); return -EMEDIUMTYPE; } block->s2b_shift = 0; /* bits to shift 512 to get a block */ for (sb = 512; sb < block->bp_block; sb = sb << 1) block->s2b_shift++; blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); block->blocks = (private->real_cyl * private->rdc_data.trk_per_cyl * blk_per_trk); dev_info(&device->cdev->dev, "DASD with %d KB/block, %d KB total size, %d KB/track, " "%s\n", (block->bp_block >> 10), ((private->real_cyl * private->rdc_data.trk_per_cyl * blk_per_trk * (block->bp_block >> 9)) >> 1), ((blk_per_trk * block->bp_block) >> 10), private->uses_cdl ? "compatible disk layout" : "linux disk layout"); return 0; } static int dasd_eckd_do_analysis(struct dasd_block *block) { struct dasd_eckd_private *private; private = (struct dasd_eckd_private *) block->base->private; if (private->init_cqr_status < 0) return dasd_eckd_start_analysis(block); else return dasd_eckd_end_analysis(block); } static int dasd_eckd_ready_to_online(struct dasd_device *device) { return dasd_alias_add_device(device); }; static int dasd_eckd_online_to_ready(struct dasd_device *device) { cancel_work_sync(&device->reload_device); return dasd_alias_remove_device(device); }; static int dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) { struct dasd_eckd_private *private; private = (struct dasd_eckd_private *) block->base->private; if (dasd_check_blocksize(block->bp_block) == 0) { geo->sectors = recs_per_track(&private->rdc_data, 0, block->bp_block); } geo->cylinders = private->rdc_data.no_cyl; geo->heads = private->rdc_data.trk_per_cyl; return 0; } static struct dasd_ccw_req * dasd_eckd_format_device(struct dasd_device * device, struct format_data_t * fdata) { struct dasd_eckd_private *private; struct dasd_ccw_req *fcp; struct eckd_count *ect; struct ccw1 *ccw; void *data; int rpt; struct ch_t address; int cplength, datasize; int i; int intensity = 0; int r0_perm; private = (struct dasd_eckd_private *) device->private; rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize); set_ch_t(&address, fdata->start_unit / private->rdc_data.trk_per_cyl, fdata->start_unit % private->rdc_data.trk_per_cyl); /* Sanity checks. */ if (fdata->start_unit >= (private->real_cyl * private->rdc_data.trk_per_cyl)) { dev_warn(&device->cdev->dev, "Start track number %d used in " "formatting is too big\n", fdata->start_unit); return ERR_PTR(-EINVAL); } if (fdata->start_unit > fdata->stop_unit) { dev_warn(&device->cdev->dev, "Start track %d used in " "formatting exceeds end track\n", fdata->start_unit); return ERR_PTR(-EINVAL); } if (dasd_check_blocksize(fdata->blksize) != 0) { dev_warn(&device->cdev->dev, "The DASD cannot be formatted with block size %d\n", fdata->blksize); return ERR_PTR(-EINVAL); } /* * fdata->intensity is a bit string that tells us what to do: * Bit 0: write record zero * Bit 1: write home address, currently not supported * Bit 2: invalidate tracks * Bit 3: use OS/390 compatible disk layout (cdl) * Bit 4: do not allow storage subsystem to modify record zero * Only some bit combinations do make sense. */ if (fdata->intensity & 0x10) { r0_perm = 0; intensity = fdata->intensity & ~0x10; } else { r0_perm = 1; intensity = fdata->intensity; } switch (intensity) { case 0x00: /* Normal format */ case 0x08: /* Normal format, use cdl. */ cplength = 2 + rpt; datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) + rpt * sizeof(struct eckd_count); break; case 0x01: /* Write record zero and format track. */ case 0x09: /* Write record zero and format track, use cdl. */ cplength = 3 + rpt; datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) + sizeof(struct eckd_count) + rpt * sizeof(struct eckd_count); break; case 0x04: /* Invalidate track. */ case 0x0c: /* Invalidate track, use cdl. */ cplength = 3; datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) + sizeof(struct eckd_count); break; default: dev_warn(&device->cdev->dev, "An I/O control call used " "incorrect flags 0x%x\n", fdata->intensity); return ERR_PTR(-EINVAL); } /* Allocate the format ccw request. */ fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); if (IS_ERR(fcp)) return fcp; data = fcp->data; ccw = fcp->cpaddr; switch (intensity & ~0x08) { case 0x00: /* Normal format. */ define_extent(ccw++, (struct DE_eckd_data *) data, fdata->start_unit, fdata->start_unit, DASD_ECKD_CCW_WRITE_CKD, device); /* grant subsystem permission to format R0 */ if (r0_perm) ((struct DE_eckd_data *)data)->ga_extended |= 0x04; data += sizeof(struct DE_eckd_data); ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, (struct LO_eckd_data *) data, fdata->start_unit, 0, rpt, DASD_ECKD_CCW_WRITE_CKD, device, fdata->blksize); data += sizeof(struct LO_eckd_data); break; case 0x01: /* Write record zero + format track. */ define_extent(ccw++, (struct DE_eckd_data *) data, fdata->start_unit, fdata->start_unit, DASD_ECKD_CCW_WRITE_RECORD_ZERO, device); data += sizeof(struct DE_eckd_data); ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, (struct LO_eckd_data *) data, fdata->start_unit, 0, rpt + 1, DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, device->block->bp_block); data += sizeof(struct LO_eckd_data); break; case 0x04: /* Invalidate track. */ define_extent(ccw++, (struct DE_eckd_data *) data, fdata->start_unit, fdata->start_unit, DASD_ECKD_CCW_WRITE_CKD, device); data += sizeof(struct DE_eckd_data); ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, (struct LO_eckd_data *) data, fdata->start_unit, 0, 1, DASD_ECKD_CCW_WRITE_CKD, device, 8); data += sizeof(struct LO_eckd_data); break; } if (intensity & 0x01) { /* write record zero */ ect = (struct eckd_count *) data; data += sizeof(struct eckd_count); ect->cyl = address.cyl; ect->head = address.head; ect->record = 0; ect->kl = 0; ect->dl = 8; ccw[-1].flags |= CCW_FLAG_CC; ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; ccw->flags = CCW_FLAG_SLI; ccw->count = 8; ccw->cda = (__u32)(addr_t) ect; ccw++; } if ((intensity & ~0x08) & 0x04) { /* erase track */ ect = (struct eckd_count *) data; data += sizeof(struct eckd_count); ect->cyl = address.cyl; ect->head = address.head; ect->record = 1; ect->kl = 0; ect->dl = 0; ccw[-1].flags |= CCW_FLAG_CC; ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; ccw->flags = CCW_FLAG_SLI; ccw->count = 8; ccw->cda = (__u32)(addr_t) ect; } else { /* write remaining records */ for (i = 0; i < rpt; i++) { ect = (struct eckd_count *) data; data += sizeof(struct eckd_count); ect->cyl = address.cyl; ect->head = address.head; ect->record = i + 1; ect->kl = 0; ect->dl = fdata->blksize; /* Check for special tracks 0-1 when formatting CDL */ if ((intensity & 0x08) && fdata->start_unit == 0) { if (i < 3) { ect->kl = 4; ect->dl = sizes_trk0[i] - 4; } } if ((intensity & 0x08) && fdata->start_unit == 1) { ect->kl = 44; ect->dl = LABEL_SIZE - 44; } ccw[-1].flags |= CCW_FLAG_CC; ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; ccw->flags = CCW_FLAG_SLI; ccw->count = 8; ccw->cda = (__u32)(addr_t) ect; ccw++; } } fcp->startdev = device; fcp->memdev = device; fcp->retries = 256; fcp->buildclk = get_clock(); fcp->status = DASD_CQR_FILLED; return fcp; } static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) { cqr->status = DASD_CQR_FILLED; if (cqr->block && (cqr->startdev != cqr->block->base)) { dasd_eckd_reset_ccw_to_base_io(cqr); cqr->startdev = cqr->block->base; } }; static dasd_erp_fn_t dasd_eckd_erp_action(struct dasd_ccw_req * cqr) { struct dasd_device *device = (struct dasd_device *) cqr->startdev; struct ccw_device *cdev = device->cdev; switch (cdev->id.cu_type) { case 0x3990: case 0x2105: case 0x2107: case 0x1750: return dasd_3990_erp_action; case 0x9343: case 0x3880: default: return dasd_default_erp_action; } } static dasd_erp_fn_t dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) { return dasd_default_erp_postaction; } static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, struct irb *irb) { char mask; char *sense = NULL; struct dasd_eckd_private *private; private = (struct dasd_eckd_private *) device->private; /* first of all check for state change pending interrupt */ mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; if ((scsw_dstat(&irb->scsw) & mask) == mask) { /* for alias only and not in offline processing*/ if (!device->block && private->lcu && !test_bit(DASD_FLAG_OFFLINE, &device->flags)) { /* * the state change could be caused by an alias * reassignment remove device from alias handling * to prevent new requests from being scheduled on * the wrong alias device */ dasd_alias_remove_device(device); /* schedule worker to reload device */ dasd_reload_device(device); } dasd_generic_handle_state_change(device); return; } /* summary unit check */ if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && (irb->ecw[7] == 0x0D)) { dasd_alias_handle_summary_unit_check(device, irb); return; } sense = dasd_get_sense(irb); /* service information message SIM */ if (sense && !(sense[27] & DASD_SENSE_BIT_0) && ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { dasd_3990_erp_handle_sim(device, sense); dasd_schedule_device_bh(device); return; } if ((scsw_cc(&irb->scsw) == 1) && (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) && (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) { /* fake irb do nothing, they are handled elsewhere */ dasd_schedule_device_bh(device); return; } if (!sense) { /* just report other unsolicited interrupts */ DBF_DEV_EVENT(DBF_ERR, device, "%s", "unsolicited interrupt received"); } else { DBF_DEV_EVENT(DBF_ERR, device, "%s", "unsolicited interrupt received " "(sense available)"); device->discipline->dump_sense_dbf(device, irb, "unsolicited"); } dasd_schedule_device_bh(device); return; }; static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( struct dasd_device *startdev, struct dasd_block *block, struct request *req, sector_t first_rec, sector_t last_rec, sector_t first_trk, sector_t last_trk, unsigned int first_offs, unsigned int last_offs, unsigned int blk_per_trk, unsigned int blksize) { struct dasd_eckd_private *private; unsigned long *idaws; struct LO_eckd_data *LO_data; struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; struct bio_vec *bv; char *dst; unsigned int off; int count, cidaw, cplength, datasize; sector_t recid; unsigned char cmd, rcmd; int use_prefix; struct dasd_device *basedev; basedev = block->base; private = (struct dasd_eckd_private *) basedev->private; if (rq_data_dir(req) == READ) cmd = DASD_ECKD_CCW_READ_MT; else if (rq_data_dir(req) == WRITE) cmd = DASD_ECKD_CCW_WRITE_MT; else return ERR_PTR(-EINVAL); /* Check struct bio and count the number of blocks for the request. */ count = 0; cidaw = 0; rq_for_each_segment(bv, req, iter) { if (bv->bv_len & (blksize - 1)) /* Eckd can only do full blocks. */ return ERR_PTR(-EINVAL); count += bv->bv_len >> (block->s2b_shift + 9); #if defined(CONFIG_64BIT) if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) cidaw += bv->bv_len >> (block->s2b_shift + 9); #endif } /* Paranoia. */ if (count != last_rec - first_rec + 1) return ERR_PTR(-EINVAL); /* use the prefix command if available */ use_prefix = private->features.feature[8] & 0x01; if (use_prefix) { /* 1x prefix + number of blocks */ cplength = 2 + count; /* 1x prefix + cidaws*sizeof(long) */ datasize = sizeof(struct PFX_eckd_data) + sizeof(struct LO_eckd_data) + cidaw * sizeof(unsigned long); } else { /* 1x define extent + 1x locate record + number of blocks */ cplength = 2 + count; /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) + cidaw * sizeof(unsigned long); } /* Find out the number of additional locate record ccws for cdl. */ if (private->uses_cdl && first_rec < 2*blk_per_trk) { if (last_rec >= 2*blk_per_trk) count = 2*blk_per_trk - first_rec; cplength += count; datasize += count*sizeof(struct LO_eckd_data); } /* Allocate the ccw request. */ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; /* First ccw is define extent or prefix. */ if (use_prefix) { if (prefix(ccw++, cqr->data, first_trk, last_trk, cmd, basedev, startdev) == -EAGAIN) { /* Clock not in sync and XRC is enabled. * Try again later. */ dasd_sfree_request(cqr, startdev); return ERR_PTR(-EAGAIN); } idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data)); } else { if (define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, startdev) == -EAGAIN) { /* Clock not in sync and XRC is enabled. * Try again later. */ dasd_sfree_request(cqr, startdev); return ERR_PTR(-EAGAIN); } idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); } /* Build locate_record+read/write/ccws. */ LO_data = (struct LO_eckd_data *) (idaws + cidaw); recid = first_rec; if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { /* Only standard blocks so there is just one locate record. */ ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, LO_data++, first_trk, first_offs + 1, last_rec - recid + 1, cmd, basedev, blksize); } rq_for_each_segment(bv, req, iter) { dst = page_address(bv->bv_page) + bv->bv_offset; if (dasd_page_cache) { char *copy = kmem_cache_alloc(dasd_page_cache, GFP_DMA | __GFP_NOWARN); if (copy && rq_data_dir(req) == WRITE) memcpy(copy + bv->bv_offset, dst, bv->bv_len); if (copy) dst = copy + bv->bv_offset; } for (off = 0; off < bv->bv_len; off += blksize) { sector_t trkid = recid; unsigned int recoffs = sector_div(trkid, blk_per_trk); rcmd = cmd; count = blksize; /* Locate record for cdl special block ? */ if (private->uses_cdl && recid < 2*blk_per_trk) { if (dasd_eckd_cdl_special(blk_per_trk, recid)){ rcmd |= 0x8; count = dasd_eckd_cdl_reclen(recid); if (count < blksize && rq_data_dir(req) == READ) memset(dst + count, 0xe5, blksize - count); } ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, LO_data++, trkid, recoffs + 1, 1, rcmd, basedev, count); } /* Locate record for standard blocks ? */ if (private->uses_cdl && recid == 2*blk_per_trk) { ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, LO_data++, trkid, recoffs + 1, last_rec - recid + 1, cmd, basedev, count); } /* Read/write ccw. */ ccw[-1].flags |= CCW_FLAG_CC; ccw->cmd_code = rcmd; ccw->count = count; if (idal_is_needed(dst, blksize)) { ccw->cda = (__u32)(addr_t) idaws; ccw->flags = CCW_FLAG_IDA; idaws = idal_create_words(idaws, dst, blksize); } else { ccw->cda = (__u32)(addr_t) dst; ccw->flags = 0; } ccw++; dst += blksize; recid++; } } if (blk_noretry_request(req) || block->base->features & DASD_FEATURE_FAILFAST) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->startdev = startdev; cqr->memdev = startdev; cqr->block = block; cqr->expires = 5 * 60 * HZ; /* 5 minutes */ cqr->lpm = private->path_data.ppm; cqr->retries = 256; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( struct dasd_device *startdev, struct dasd_block *block, struct request *req, sector_t first_rec, sector_t last_rec, sector_t first_trk, sector_t last_trk, unsigned int first_offs, unsigned int last_offs, unsigned int blk_per_trk, unsigned int blksize) { struct dasd_eckd_private *private; unsigned long *idaws; struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; struct bio_vec *bv; char *dst, *idaw_dst; unsigned int cidaw, cplength, datasize; unsigned int tlf; sector_t recid; unsigned char cmd; struct dasd_device *basedev; unsigned int trkcount, count, count_to_trk_end; unsigned int idaw_len, seg_len, part_len, len_to_track_end; unsigned char new_track, end_idaw; sector_t trkid; unsigned int recoffs; basedev = block->base; private = (struct dasd_eckd_private *) basedev->private; if (rq_data_dir(req) == READ) cmd = DASD_ECKD_CCW_READ_TRACK_DATA; else if (rq_data_dir(req) == WRITE) cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; else return ERR_PTR(-EINVAL); /* Track based I/O needs IDAWs for each page, and not just for * 64 bit addresses. We need additional idals for pages * that get filled from two tracks, so we use the number * of records as upper limit. */ cidaw = last_rec - first_rec + 1; trkcount = last_trk - first_trk + 1; /* 1x prefix + one read/write ccw per track */ cplength = 1 + trkcount; /* on 31-bit we need space for two 32 bit addresses per page * on 64-bit one 64 bit address */ datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long long); /* Allocate the ccw request. */ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; /* transfer length factor: how many bytes to read from the last track */ if (first_trk == last_trk) tlf = last_offs - first_offs + 1; else tlf = last_offs + 1; tlf *= blksize; if (prefix_LRE(ccw++, cqr->data, first_trk, last_trk, cmd, basedev, startdev, 1 /* format */, first_offs + 1, trkcount, blksize, tlf) == -EAGAIN) { /* Clock not in sync and XRC is enabled. * Try again later. */ dasd_sfree_request(cqr, startdev); return ERR_PTR(-EAGAIN); } /* * The translation of request into ccw programs must meet the * following conditions: * - all idaws but the first and the last must address full pages * (or 2K blocks on 31-bit) * - the scope of a ccw and it's idal ends with the track boundaries */ idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data)); recid = first_rec; new_track = 1; end_idaw = 0; len_to_track_end = 0; idaw_dst = 0; idaw_len = 0; rq_for_each_segment(bv, req, iter) { dst = page_address(bv->bv_page) + bv->bv_offset; seg_len = bv->bv_len; while (seg_len) { if (new_track) { trkid = recid; recoffs = sector_div(trkid, blk_per_trk); count_to_trk_end = blk_per_trk - recoffs; count = min((last_rec - recid + 1), (sector_t)count_to_trk_end); len_to_track_end = count * blksize; ccw[-1].flags |= CCW_FLAG_CC; ccw->cmd_code = cmd; ccw->count = len_to_track_end; ccw->cda = (__u32)(addr_t)idaws; ccw->flags = CCW_FLAG_IDA; ccw++; recid += count; new_track = 0; /* first idaw for a ccw may start anywhere */ if (!idaw_dst) idaw_dst = dst; } /* If we start a new idaw, we must make sure that it * starts on an IDA_BLOCK_SIZE boundary. * If we continue an idaw, we must make sure that the * current segment begins where the so far accumulated * idaw ends */ if (!idaw_dst) { if (__pa(dst) & (IDA_BLOCK_SIZE-1)) { dasd_sfree_request(cqr, startdev); return ERR_PTR(-ERANGE); } else idaw_dst = dst; } if ((idaw_dst + idaw_len) != dst) { dasd_sfree_request(cqr, startdev); return ERR_PTR(-ERANGE); } part_len = min(seg_len, len_to_track_end); seg_len -= part_len; dst += part_len; idaw_len += part_len; len_to_track_end -= part_len; /* collected memory area ends on an IDA_BLOCK border, * -> create an idaw * idal_create_words will handle cases where idaw_len * is larger then IDA_BLOCK_SIZE */ if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1))) end_idaw = 1; /* We also need to end the idaw at track end */ if (!len_to_track_end) { new_track = 1; end_idaw = 1; } if (end_idaw) { idaws = idal_create_words(idaws, idaw_dst, idaw_len); idaw_dst = 0; idaw_len = 0; end_idaw = 0; } } } if (blk_noretry_request(req) || block->base->features & DASD_FEATURE_FAILFAST) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->startdev = startdev; cqr->memdev = startdev; cqr->block = block; cqr->expires = 5 * 60 * HZ; /* 5 minutes */ cqr->lpm = private->path_data.ppm; cqr->retries = 256; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } static int prepare_itcw(struct itcw *itcw, unsigned int trk, unsigned int totrk, int cmd, struct dasd_device *basedev, struct dasd_device *startdev, unsigned int rec_on_trk, int count, unsigned int blksize, unsigned int total_data_size, unsigned int tlf, unsigned int blk_per_trk) { struct PFX_eckd_data pfxdata; struct dasd_eckd_private *basepriv, *startpriv; struct DE_eckd_data *dedata; struct LRE_eckd_data *lredata; struct dcw *dcw; u32 begcyl, endcyl; u16 heads, beghead, endhead; u8 pfx_cmd; int rc = 0; int sector = 0; int dn, d; /* setup prefix data */ basepriv = (struct dasd_eckd_private *) basedev->private; startpriv = (struct dasd_eckd_private *) startdev->private; dedata = &pfxdata.define_extent; lredata = &pfxdata.locate_record; memset(&pfxdata, 0, sizeof(pfxdata)); pfxdata.format = 1; /* PFX with LRE */ pfxdata.base_address = basepriv->ned->unit_addr; pfxdata.base_lss = basepriv->ned->ID; pfxdata.validity.define_extent = 1; /* private uid is kept up to date, conf_data may be outdated */ if (startpriv->uid.type != UA_BASE_DEVICE) { pfxdata.validity.verify_base = 1; if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) pfxdata.validity.hyper_pav = 1; } switch (cmd) { case DASD_ECKD_CCW_READ_TRACK_DATA: dedata->mask.perm = 0x1; dedata->attributes.operation = basepriv->attrib.operation; dedata->blk_size = blksize; dedata->ga_extended |= 0x42; lredata->operation.orientation = 0x0; lredata->operation.operation = 0x0C; lredata->auxiliary.check_bytes = 0x01; pfx_cmd = DASD_ECKD_CCW_PFX_READ; break; case DASD_ECKD_CCW_WRITE_TRACK_DATA: dedata->mask.perm = 0x02; dedata->attributes.operation = basepriv->attrib.operation; dedata->blk_size = blksize; rc = check_XRC_on_prefix(&pfxdata, basedev); dedata->ga_extended |= 0x42; lredata->operation.orientation = 0x0; lredata->operation.operation = 0x3F; lredata->extended_operation = 0x23; lredata->auxiliary.check_bytes = 0x2; pfx_cmd = DASD_ECKD_CCW_PFX; break; default: DBF_DEV_EVENT(DBF_ERR, basedev, "prepare itcw, unknown opcode 0x%x", cmd); BUG(); break; } if (rc) return rc; dedata->attributes.mode = 0x3; /* ECKD */ heads = basepriv->rdc_data.trk_per_cyl; begcyl = trk / heads; beghead = trk % heads; endcyl = totrk / heads; endhead = totrk % heads; /* check for sequential prestage - enhance cylinder range */ if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || dedata->attributes.operation == DASD_SEQ_ACCESS) { if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) endcyl += basepriv->attrib.nr_cyl; else endcyl = (basepriv->real_cyl - 1); } set_ch_t(&dedata->beg_ext, begcyl, beghead); set_ch_t(&dedata->end_ext, endcyl, endhead); dedata->ep_format = 0x20; /* records per track is valid */ dedata->ep_rec_per_track = blk_per_trk; if (rec_on_trk) { switch (basepriv->rdc_data.dev_type) { case 0x3390: dn = ceil_quot(blksize + 6, 232); d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34); sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; break; case 0x3380: d = 7 + ceil_quot(blksize + 12, 32); sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; break; } } lredata->auxiliary.length_valid = 1; lredata->auxiliary.length_scope = 1; lredata->auxiliary.imbedded_ccw_valid = 1; lredata->length = tlf; lredata->imbedded_ccw = cmd; lredata->count = count; lredata->sector = sector; set_ch_t(&lredata->seek_addr, begcyl, beghead); lredata->search_arg.cyl = lredata->seek_addr.cyl; lredata->search_arg.head = lredata->seek_addr.head; lredata->search_arg.record = rec_on_trk; dcw = itcw_add_dcw(itcw, pfx_cmd, 0, &pfxdata, sizeof(pfxdata), total_data_size); return rc; } static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( struct dasd_device *startdev, struct dasd_block *block, struct request *req, sector_t first_rec, sector_t last_rec, sector_t first_trk, sector_t last_trk, unsigned int first_offs, unsigned int last_offs, unsigned int blk_per_trk, unsigned int blksize) { struct dasd_eckd_private *private; struct dasd_ccw_req *cqr; struct req_iterator iter; struct bio_vec *bv; char *dst; unsigned int trkcount, ctidaw; unsigned char cmd; struct dasd_device *basedev; unsigned int tlf; struct itcw *itcw; struct tidaw *last_tidaw = NULL; int itcw_op; size_t itcw_size; basedev = block->base; private = (struct dasd_eckd_private *) basedev->private; if (rq_data_dir(req) == READ) { cmd = DASD_ECKD_CCW_READ_TRACK_DATA; itcw_op = ITCW_OP_READ; } else if (rq_data_dir(req) == WRITE) { cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; itcw_op = ITCW_OP_WRITE; } else return ERR_PTR(-EINVAL); /* trackbased I/O needs address all memory via TIDAWs, * not just for 64 bit addresses. This allows us to map * each segment directly to one tidaw. */ trkcount = last_trk - first_trk + 1; ctidaw = 0; rq_for_each_segment(bv, req, iter) { ++ctidaw; } /* Allocate the ccw request. */ itcw_size = itcw_calc_size(0, ctidaw, 0); cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); if (IS_ERR(cqr)) return cqr; cqr->cpmode = 1; cqr->startdev = startdev; cqr->memdev = startdev; cqr->block = block; cqr->expires = 100*HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; cqr->retries = 10; /* transfer length factor: how many bytes to read from the last track */ if (first_trk == last_trk) tlf = last_offs - first_offs + 1; else tlf = last_offs + 1; tlf *= blksize; itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); cqr->cpaddr = itcw_get_tcw(itcw); if (prepare_itcw(itcw, first_trk, last_trk, cmd, basedev, startdev, first_offs + 1, trkcount, blksize, (last_rec - first_rec + 1) * blksize, tlf, blk_per_trk) == -EAGAIN) { /* Clock not in sync and XRC is enabled. * Try again later. */ dasd_sfree_request(cqr, startdev); return ERR_PTR(-EAGAIN); } /* * A tidaw can address 4k of memory, but must not cross page boundaries * We can let the block layer handle this by setting * blk_queue_segment_boundary to page boundaries and * blk_max_segment_size to page size when setting up the request queue. */ rq_for_each_segment(bv, req, iter) { dst = page_address(bv->bv_page) + bv->bv_offset; last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len); if (IS_ERR(last_tidaw)) return (struct dasd_ccw_req *)last_tidaw; } last_tidaw->flags |= 0x80; itcw_finalize(itcw); if (blk_noretry_request(req) || block->base->features & DASD_FEATURE_FAILFAST) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->startdev = startdev; cqr->memdev = startdev; cqr->block = block; cqr->expires = 5 * 60 * HZ; /* 5 minutes */ cqr->lpm = private->path_data.ppm; cqr->retries = 256; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, struct dasd_block *block, struct request *req) { int tpm, cmdrtd, cmdwtd; int use_prefix; #if defined(CONFIG_64BIT) int fcx_in_css, fcx_in_gneq, fcx_in_features; #endif struct dasd_eckd_private *private; struct dasd_device *basedev; sector_t first_rec, last_rec; sector_t first_trk, last_trk; unsigned int first_offs, last_offs; unsigned int blk_per_trk, blksize; int cdlspecial; struct dasd_ccw_req *cqr; basedev = block->base; private = (struct dasd_eckd_private *) basedev->private; /* Calculate number of blocks/records per track. */ blksize = block->bp_block; blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); if (blk_per_trk == 0) return ERR_PTR(-EINVAL); /* Calculate record id of first and last block. */ first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; first_offs = sector_div(first_trk, blk_per_trk); last_rec = last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; last_offs = sector_div(last_trk, blk_per_trk); cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); /* is transport mode supported? */ #if defined(CONFIG_64BIT) fcx_in_css = css_general_characteristics.fcx; fcx_in_gneq = private->gneq->reserved2[7] & 0x04; fcx_in_features = private->features.feature[40] & 0x80; tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; #else tpm = 0; #endif /* is read track data and write track data in command mode supported? */ cmdrtd = private->features.feature[9] & 0x20; cmdwtd = private->features.feature[12] & 0x40; use_prefix = private->features.feature[8] & 0x01; cqr = NULL; if (cdlspecial || dasd_page_cache) { /* do nothing, just fall through to the cmd mode single case */ } else if (!dasd_nofcx && tpm && (first_trk == last_trk)) { cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, first_rec, last_rec, first_trk, last_trk, first_offs, last_offs, blk_per_trk, blksize); if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) cqr = NULL; } else if (use_prefix && (((rq_data_dir(req) == READ) && cmdrtd) || ((rq_data_dir(req) == WRITE) && cmdwtd))) { cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req, first_rec, last_rec, first_trk, last_trk, first_offs, last_offs, blk_per_trk, blksize); if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) cqr = NULL; } if (!cqr) cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req, first_rec, last_rec, first_trk, last_trk, first_offs, last_offs, blk_per_trk, blksize); return cqr; } static int dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) { struct dasd_eckd_private *private; struct ccw1 *ccw; struct req_iterator iter; struct bio_vec *bv; char *dst, *cda; unsigned int blksize, blk_per_trk, off; sector_t recid; int status; if (!dasd_page_cache) goto out; private = (struct dasd_eckd_private *) cqr->block->base->private; blksize = cqr->block->bp_block; blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); recid = blk_rq_pos(req) >> cqr->block->s2b_shift; ccw = cqr->cpaddr; /* Skip over define extent & locate record. */ ccw++; if (private->uses_cdl == 0 || recid > 2*blk_per_trk) ccw++; rq_for_each_segment(bv, req, iter) { dst = page_address(bv->bv_page) + bv->bv_offset; for (off = 0; off < bv->bv_len; off += blksize) { /* Skip locate record. */ if (private->uses_cdl && recid <= 2*blk_per_trk) ccw++; if (dst) { if (ccw->flags & CCW_FLAG_IDA) cda = *((char **)((addr_t) ccw->cda)); else cda = (char *)((addr_t) ccw->cda); if (dst != cda) { if (rq_data_dir(req) == READ) memcpy(dst, cda, bv->bv_len); kmem_cache_free(dasd_page_cache, (void *)((addr_t)cda & PAGE_MASK)); } dst = NULL; } ccw++; recid++; } } out: status = cqr->status == DASD_CQR_DONE; dasd_sfree_request(cqr, cqr->memdev); return status; } /* * Modify ccw/tcw in cqr so it can be started on a base device. * * Note that this is not enough to restart the cqr! * Either reset cqr->startdev as well (summary unit check handling) * or restart via separate cqr (as in ERP handling). */ void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr) { struct ccw1 *ccw; struct PFX_eckd_data *pfxdata; struct tcw *tcw; struct tccb *tccb; struct dcw *dcw; if (cqr->cpmode == 1) { tcw = cqr->cpaddr; tccb = tcw_get_tccb(tcw); dcw = (struct dcw *)&tccb->tca[0]; pfxdata = (struct PFX_eckd_data *)&dcw->cd[0]; pfxdata->validity.verify_base = 0; pfxdata->validity.hyper_pav = 0; } else { ccw = cqr->cpaddr; pfxdata = cqr->data; if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { pfxdata->validity.verify_base = 0; pfxdata->validity.hyper_pav = 0; } } } #define DASD_ECKD_CHANQ_MAX_SIZE 4 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, struct dasd_block *block, struct request *req) { struct dasd_eckd_private *private; struct dasd_device *startdev; unsigned long flags; struct dasd_ccw_req *cqr; startdev = dasd_alias_get_start_dev(base); if (!startdev) startdev = base; private = (struct dasd_eckd_private *) startdev->private; if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE) return ERR_PTR(-EBUSY); spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); private->count++; cqr = dasd_eckd_build_cp(startdev, block, req); if (IS_ERR(cqr)) private->count--; spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); return cqr; } static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr, struct request *req) { struct dasd_eckd_private *private; unsigned long flags; spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags); private = (struct dasd_eckd_private *) cqr->memdev->private; private->count--; spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags); return dasd_eckd_free_cp(cqr, req); } static int dasd_eckd_fill_info(struct dasd_device * device, struct dasd_information2_t * info) { struct dasd_eckd_private *private; private = (struct dasd_eckd_private *) device->private; info->label_block = 2; info->FBA_layout = private->uses_cdl ? 0 : 1; info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL; info->characteristics_size = sizeof(struct dasd_eckd_characteristics); memcpy(info->characteristics, &private->rdc_data, sizeof(struct dasd_eckd_characteristics)); info->confdata_size = min((unsigned long)private->conf_len, sizeof(info->configuration_data)); memcpy(info->configuration_data, private->conf_data, info->confdata_size); return 0; } /* * SECTION: ioctl functions for eckd devices. */ /* * Release device ioctl. * Buils a channel programm to releases a prior reserved * (see dasd_eckd_reserve) device. */ static int dasd_eckd_release(struct dasd_device *device) { struct dasd_ccw_req *cqr; int rc; struct ccw1 *ccw; if (!capable(CAP_SYS_ADMIN)) return -EACCES; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate initialization request"); return PTR_ERR(cqr); } ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_RELEASE; ccw->flags |= CCW_FLAG_SLI; ccw->count = 32; ccw->cda = (__u32)(addr_t) cqr->data; cqr->startdev = device; cqr->memdev = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); dasd_sfree_request(cqr, cqr->memdev); return rc; } /* * Reserve device ioctl. * Options are set to 'synchronous wait for interrupt' and * 'timeout the request'. This leads to a terminate IO if * the interrupt is outstanding for a certain time. */ static int dasd_eckd_reserve(struct dasd_device *device) { struct dasd_ccw_req *cqr; int rc; struct ccw1 *ccw; if (!capable(CAP_SYS_ADMIN)) return -EACCES; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate initialization request"); return PTR_ERR(cqr); } ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_RESERVE; ccw->flags |= CCW_FLAG_SLI; ccw->count = 32; ccw->cda = (__u32)(addr_t) cqr->data; cqr->startdev = device; cqr->memdev = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); dasd_sfree_request(cqr, cqr->memdev); return rc; } /* * Steal lock ioctl - unconditional reserve device. * Buils a channel programm to break a device's reservation. * (unconditional reserve) */ static int dasd_eckd_steal_lock(struct dasd_device *device) { struct dasd_ccw_req *cqr; int rc; struct ccw1 *ccw; if (!capable(CAP_SYS_ADMIN)) return -EACCES; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate initialization request"); return PTR_ERR(cqr); } ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_SLCK; ccw->flags |= CCW_FLAG_SLI; ccw->count = 32; ccw->cda = (__u32)(addr_t) cqr->data; cqr->startdev = device; cqr->memdev = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); dasd_sfree_request(cqr, cqr->memdev); return rc; } /* * Read performance statistics */ static int dasd_eckd_performance(struct dasd_device *device, void __user *argp) { struct dasd_psf_prssd_data *prssdp; struct dasd_rssd_perf_stats_t *stats; struct dasd_ccw_req *cqr; struct ccw1 *ccw; int rc; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, (sizeof(struct dasd_psf_prssd_data) + sizeof(struct dasd_rssd_perf_stats_t)), device); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate initialization request"); return PTR_ERR(cqr); } cqr->startdev = device; cqr->memdev = device; cqr->retries = 0; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); cqr->expires = 10 * HZ; /* Prepare for Read Subsystem Data */ prssdp = (struct dasd_psf_prssd_data *) cqr->data; memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); prssdp->order = PSF_ORDER_PRSSD; prssdp->suborder = 0x01; /* Performance Statistics */ prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->count = sizeof(struct dasd_psf_prssd_data); ccw->flags |= CCW_FLAG_CC; ccw->cda = (__u32)(addr_t) prssdp; /* Read Subsystem Data - Performance Statistics */ stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t)); ccw++; ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = sizeof(struct dasd_rssd_perf_stats_t); ccw->cda = (__u32)(addr_t) stats; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on(cqr); if (rc == 0) { prssdp = (struct dasd_psf_prssd_data *) cqr->data; stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); if (copy_to_user(argp, stats, sizeof(struct dasd_rssd_perf_stats_t))) rc = -EFAULT; } dasd_sfree_request(cqr, cqr->memdev); return rc; } /* * Get attributes (cache operations) * Returnes the cache attributes used in Define Extend (DE). */ static int dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) { struct dasd_eckd_private *private = (struct dasd_eckd_private *)device->private; struct attrib_data_t attrib = private->attrib; int rc; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!argp) return -EINVAL; rc = 0; if (copy_to_user(argp, (long *) &attrib, sizeof(struct attrib_data_t))) rc = -EFAULT; return rc; } /* * Set attributes (cache operations) * Stores the attributes for cache operation to be used in Define Extend (DE). */ static int dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) { struct dasd_eckd_private *private = (struct dasd_eckd_private *)device->private; struct attrib_data_t attrib; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!argp) return -EINVAL; if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) return -EFAULT; private->attrib = attrib; dev_info(&device->cdev->dev, "The DASD cache mode was set to %x (%i cylinder prestage)\n", private->attrib.operation, private->attrib.nr_cyl); return 0; } /* * Issue syscall I/O to EMC Symmetrix array. * CCWs are PSF and RSSD */ static int dasd_symm_io(struct dasd_device *device, void __user *argp) { struct dasd_symmio_parms usrparm; char *psf_data, *rssd_result; struct dasd_ccw_req *cqr; struct ccw1 *ccw; char psf0, psf1; int rc; if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) return -EACCES; psf0 = psf1 = 0; /* Copy parms from caller */ rc = -EFAULT; if (copy_from_user(&usrparm, argp, sizeof(usrparm))) goto out; if (is_compat_task() || sizeof(long) == 4) { /* Make sure pointers are sane even on 31 bit. */ rc = -EINVAL; if ((usrparm.psf_data >> 32) != 0) goto out; if ((usrparm.rssd_result >> 32) != 0) goto out; usrparm.psf_data &= 0x7fffffffULL; usrparm.rssd_result &= 0x7fffffffULL; } /* alloc I/O data area */ psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); if (!psf_data || !rssd_result) { rc = -ENOMEM; goto out_free; } /* get syscall header from user space */ rc = -EFAULT; if (copy_from_user(psf_data, (void __user *)(unsigned long) usrparm.psf_data, usrparm.psf_data_len)) goto out_free; psf0 = psf_data[0]; psf1 = psf_data[1]; /* setup CCWs for PSF + RSSD */ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate initialization request"); rc = PTR_ERR(cqr); goto out_free; } cqr->startdev = device; cqr->memdev = device; cqr->retries = 3; cqr->expires = 10 * HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; /* Build the ccws */ ccw = cqr->cpaddr; /* PSF ccw */ ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->count = usrparm.psf_data_len; ccw->flags |= CCW_FLAG_CC; ccw->cda = (__u32)(addr_t) psf_data; ccw++; /* RSSD ccw */ ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = usrparm.rssd_result_len; ccw->flags = CCW_FLAG_SLI ; ccw->cda = (__u32)(addr_t) rssd_result; rc = dasd_sleep_on(cqr); if (rc) goto out_sfree; rc = -EFAULT; if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result, rssd_result, usrparm.rssd_result_len)) goto out_sfree; rc = 0; out_sfree: dasd_sfree_request(cqr, cqr->memdev); out_free: kfree(rssd_result); kfree(psf_data); out: DBF_DEV_EVENT(DBF_WARNING, device, "Symmetrix ioctl (0x%02x 0x%02x): rc=%d", (int) psf0, (int) psf1, rc); return rc; } static int dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) { struct dasd_device *device = block->base; switch (cmd) { case BIODASDGATTR: return dasd_eckd_get_attrib(device, argp); case BIODASDSATTR: return dasd_eckd_set_attrib(device, argp); case BIODASDPSRD: return dasd_eckd_performance(device, argp); case BIODASDRLSE: return dasd_eckd_release(device); case BIODASDRSRV: return dasd_eckd_reserve(device); case BIODASDSLCK: return dasd_eckd_steal_lock(device); case BIODASDSYMMIO: return dasd_symm_io(device, argp); default: return -ENOIOCTLCMD; } } /* * Dump the range of CCWs into 'page' buffer * and return number of printed chars. */ static int dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) { int len, count; char *datap; len = 0; while (from <= to) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " CCW %p: %08X %08X DAT:", from, ((int *) from)[0], ((int *) from)[1]); /* get pointer to data (consider IDALs) */ if (from->flags & CCW_FLAG_IDA) datap = (char *) *((addr_t *) (addr_t) from->cda); else datap = (char *) ((addr_t) from->cda); /* dump data (max 32 bytes) */ for (count = 0; count < from->count && count < 32; count++) { if (count % 8 == 0) len += sprintf(page + len, " "); if (count % 4 == 0) len += sprintf(page + len, " "); len += sprintf(page + len, "%02x", datap[count]); } len += sprintf(page + len, "\n"); from++; } return len; } static void dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, char *reason) { u64 *sense; sense = (u64 *) dasd_get_sense(irb); if (sense) { DBF_DEV_EVENT(DBF_EMERG, device, "%s: %s %02x%02x%02x %016llx %016llx %016llx " "%016llx", reason, scsw_is_tm(&irb->scsw) ? "t" : "c", scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), sense[0], sense[1], sense[2], sense[3]); } else { DBF_DEV_EVENT(DBF_EMERG, device, "%s", "SORRY - NO VALID SENSE AVAILABLE\n"); } } /* * Print sense data and related channel program. * Parts are printed because printk buffer is only 1024 bytes. */ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, struct dasd_ccw_req *req, struct irb *irb) { char *page; struct ccw1 *first, *last, *fail, *from, *to; int len, sl, sct; page = (char *) get_zeroed_page(GFP_ATOMIC); if (page == NULL) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "No memory to dump sense data\n"); return; } /* dump the sense data */ len = sprintf(page, KERN_ERR PRINTK_HEADER " I/O status report for device %s:\n", dev_name(&device->cdev->dev)); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), scsw_cc(&irb->scsw), req ? req->intrc : 0); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " device %s: Failing CCW: %p\n", dev_name(&device->cdev->dev), (void *) (addr_t) irb->scsw.cmd.cpa); if (irb->esw.esw0.erw.cons) { for (sl = 0; sl < 4; sl++) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); for (sct = 0; sct < 8; sct++) { len += sprintf(page + len, " %02x", irb->ecw[8 * sl + sct]); } len += sprintf(page + len, "\n"); } if (irb->ecw[27] & DASD_SENSE_BIT_0) { /* 24 Byte Sense Data */ sprintf(page + len, KERN_ERR PRINTK_HEADER " 24 Byte: %x MSG %x, " "%s MSGb to SYSOP\n", irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, irb->ecw[1] & 0x10 ? "" : "no"); } else { /* 32 Byte Sense Data */ sprintf(page + len, KERN_ERR PRINTK_HEADER " 32 Byte: Format: %x " "Exception class %x\n", irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); } } else { sprintf(page + len, KERN_ERR PRINTK_HEADER " SORRY - NO VALID SENSE AVAILABLE\n"); } printk("%s", page); if (req) { /* req == NULL for unsolicited interrupts */ /* dump the Channel Program (max 140 Bytes per line) */ /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ first = req->cpaddr; for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); to = min(first + 6, last); len = sprintf(page, KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req); dasd_eckd_dump_ccw_range(first, to, page + len); printk("%s", page); /* print failing CCW area (maximum 4) */ /* scsw->cda is either valid or zero */ len = 0; from = ++to; fail = (struct ccw1 *)(addr_t) irb->scsw.cmd.cpa; /* failing CCW */ if (from < fail - 2) { from = fail - 2; /* there is a gap - print header */ len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); } to = min(fail + 1, last); len += dasd_eckd_dump_ccw_range(from, to, page + len); /* print last CCWs (maximum 2) */ from = max(from, ++to); if (from < last - 1) { from = last - 1; /* there is a gap - print header */ len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); } len += dasd_eckd_dump_ccw_range(from, last, page + len); if (len > 0) printk("%s", page); } free_page((unsigned long) page); } /* * Print sense data from a tcw. */ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, struct dasd_ccw_req *req, struct irb *irb) { char *page; int len, sl, sct, residual; struct tsb *tsb; u8 *sense; page = (char *) get_zeroed_page(GFP_ATOMIC); if (page == NULL) { DBF_DEV_EVENT(DBF_WARNING, device, " %s", "No memory to dump sense data"); return; } /* dump the sense data */ len = sprintf(page, KERN_ERR PRINTK_HEADER " I/O status report for device %s:\n", dev_name(&device->cdev->dev)); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d " "fcxs: 0x%02X schxs: 0x%02X\n", req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), scsw_cc(&irb->scsw), req->intrc, irb->scsw.tm.fcxs, irb->scsw.tm.schxs); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " device %s: Failing TCW: %p\n", dev_name(&device->cdev->dev), (void *) (addr_t) irb->scsw.tm.tcw); tsb = NULL; sense = NULL; if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs == 0x01)) tsb = tcw_get_tsb( (struct tcw *)(unsigned long)irb->scsw.tm.tcw); if (tsb) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->length %d\n", tsb->length); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->flags %x\n", tsb->flags); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->dcw_offset %d\n", tsb->dcw_offset); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->count %d\n", tsb->count); residual = tsb->count - 28; len += sprintf(page + len, KERN_ERR PRINTK_HEADER " residual %d\n", residual); switch (tsb->flags & 0x07) { case 1: /* tsa_iostat */ len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->tsa.iostat.dev_time %d\n", tsb->tsa.iostat.dev_time); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->tsa.iostat.def_time %d\n", tsb->tsa.iostat.def_time); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->tsa.iostat.queue_time %d\n", tsb->tsa.iostat.queue_time); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->tsa.iostat.dev_busy_time %d\n", tsb->tsa.iostat.dev_busy_time); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->tsa.iostat.dev_act_time %d\n", tsb->tsa.iostat.dev_act_time); sense = tsb->tsa.iostat.sense; break; case 2: /* ts_ddpc */ len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->tsa.ddpc.rcq: "); for (sl = 0; sl < 16; sl++) { for (sct = 0; sct < 8; sct++) { len += sprintf(page + len, " %02x", tsb->tsa.ddpc.rcq[sl]); } len += sprintf(page + len, "\n"); } sense = tsb->tsa.ddpc.sense; break; case 3: /* tsa_intrg */ len += sprintf(page + len, KERN_ERR PRINTK_HEADER " tsb->tsa.intrg.: not supportet yet \n"); break; } if (sense) { for (sl = 0; sl < 4; sl++) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); for (sct = 0; sct < 8; sct++) { len += sprintf(page + len, " %02x", sense[8 * sl + sct]); } len += sprintf(page + len, "\n"); } if (sense[27] & DASD_SENSE_BIT_0) { /* 24 Byte Sense Data */ sprintf(page + len, KERN_ERR PRINTK_HEADER " 24 Byte: %x MSG %x, " "%s MSGb to SYSOP\n", sense[7] >> 4, sense[7] & 0x0f, sense[1] & 0x10 ? "" : "no"); } else { /* 32 Byte Sense Data */ sprintf(page + len, KERN_ERR PRINTK_HEADER " 32 Byte: Format: %x " "Exception class %x\n", sense[6] & 0x0f, sense[22] >> 4); } } else { sprintf(page + len, KERN_ERR PRINTK_HEADER " SORRY - NO VALID SENSE AVAILABLE\n"); } } else { sprintf(page + len, KERN_ERR PRINTK_HEADER " SORRY - NO TSB DATA AVAILABLE\n"); } printk("%s", page); free_page((unsigned long) page); } static void dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req *req, struct irb *irb) { if (req && scsw_is_tm(&req->irb.scsw)) dasd_eckd_dump_sense_tcw(device, req, irb); else dasd_eckd_dump_sense_ccw(device, req, irb); } static int dasd_eckd_pm_freeze(struct dasd_device *device) { /* * the device should be disconnected from our LCU structure * on restore we will reconnect it and reread LCU specific * information like PAV support that might have changed */ dasd_alias_remove_device(device); dasd_alias_disconnect_device_from_lcu(device); return 0; } static int dasd_eckd_restore_device(struct dasd_device *device) { struct dasd_eckd_private *private; struct dasd_eckd_characteristics temp_rdc_data; int is_known, rc; struct dasd_uid temp_uid; unsigned long flags; private = (struct dasd_eckd_private *) device->private; /* Read Configuration Data */ rc = dasd_eckd_read_conf(device); if (rc) goto out_err; dasd_eckd_get_uid(device, &temp_uid); /* Generate device unique id */ rc = dasd_eckd_generate_uid(device); spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) dev_err(&device->cdev->dev, "The UID of the DASD has " "changed\n"); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); if (rc) goto out_err; /* register lcu with alias handling, enable PAV if this is a new lcu */ is_known = dasd_alias_make_device_known_to_lcu(device); if (is_known < 0) return is_known; if (!is_known) { dasd_eckd_validate_server(device); dasd_alias_lcu_setup_complete(device); } else dasd_alias_wait_for_lcu_setup(device); /* RE-Read Configuration Data */ rc = dasd_eckd_read_conf(device); if (rc) goto out_err; /* Read Feature Codes */ dasd_eckd_read_features(device); /* Read Device Characteristics */ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, &temp_rdc_data, 64); if (rc) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "Read device characteristic failed, rc=%d", rc); goto out_err; } spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data)); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); /* add device to alias management */ dasd_alias_add_device(device); return 0; out_err: return -1; } static int dasd_eckd_reload_device(struct dasd_device *device) { struct dasd_eckd_private *private; int rc, old_base; char print_uid[60]; struct dasd_uid uid; unsigned long flags; private = (struct dasd_eckd_private *) device->private; spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); old_base = private->uid.base_unit_addr; spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); /* Read Configuration Data */ rc = dasd_eckd_read_conf(device); if (rc) goto out_err; rc = dasd_eckd_generate_uid(device); if (rc) goto out_err; /* * update unit address configuration and * add device to alias management */ dasd_alias_update_add_device(device); dasd_eckd_get_uid(device, &uid); if (old_base != uid.base_unit_addr) { if (strlen(uid.vduit) > 0) snprintf(print_uid, sizeof(print_uid), "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial, uid.ssid, uid.base_unit_addr, uid.vduit); else snprintf(print_uid, sizeof(print_uid), "%s.%s.%04x.%02x", uid.vendor, uid.serial, uid.ssid, uid.base_unit_addr); dev_info(&device->cdev->dev, "An Alias device was reassigned to a new base device " "with UID: %s\n", print_uid); } return 0; out_err: return -1; } static struct ccw_driver dasd_eckd_driver = { .name = "dasd-eckd", .owner = THIS_MODULE, .ids = dasd_eckd_ids, .probe = dasd_eckd_probe, .remove = dasd_generic_remove, .set_offline = dasd_generic_set_offline, .set_online = dasd_eckd_set_online, .notify = dasd_generic_notify, .freeze = dasd_generic_pm_freeze, .thaw = dasd_generic_restore_device, .restore = dasd_generic_restore_device, .uc_handler = dasd_generic_uc_handler, }; /* * max_blocks is dependent on the amount of storage that is available * in the static io buffer for each device. Currently each device has * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In * addition we have one define extent ccw + 16 bytes of data and one * locate record ccw + 16 bytes of data. That makes: * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum. * We want to fit two into the available memory so that we can immediately * start the next request if one finishes off. That makes 249.5 blocks * for one request. Give a little safety and the result is 240. */ static struct dasd_discipline dasd_eckd_discipline = { .owner = THIS_MODULE, .name = "ECKD", .ebcname = "ECKD", .max_blocks = 240, .check_device = dasd_eckd_check_characteristics, .uncheck_device = dasd_eckd_uncheck_device, .do_analysis = dasd_eckd_do_analysis, .ready_to_online = dasd_eckd_ready_to_online, .online_to_ready = dasd_eckd_online_to_ready, .fill_geometry = dasd_eckd_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, .handle_terminated_request = dasd_eckd_handle_terminated_request, .format_device = dasd_eckd_format_device, .erp_action = dasd_eckd_erp_action, .erp_postaction = dasd_eckd_erp_postaction, .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt, .build_cp = dasd_eckd_build_alias_cp, .free_cp = dasd_eckd_free_alias_cp, .dump_sense = dasd_eckd_dump_sense, .dump_sense_dbf = dasd_eckd_dump_sense_dbf, .fill_info = dasd_eckd_fill_info, .ioctl = dasd_eckd_ioctl, .freeze = dasd_eckd_pm_freeze, .restore = dasd_eckd_restore_device, .reload = dasd_eckd_reload_device, .get_uid = dasd_eckd_get_uid, }; static int __init dasd_eckd_init(void) { int ret; ASCEBC(dasd_eckd_discipline.ebcname, 4); ret = ccw_driver_register(&dasd_eckd_driver); if (!ret) wait_for_device_probe(); return ret; } static void __exit dasd_eckd_cleanup(void) { ccw_driver_unregister(&dasd_eckd_driver); } module_init(dasd_eckd_init); module_exit(dasd_eckd_cleanup);
gpl-2.0
jlyo/android_kernel_samsung_ypg1
drivers/message/fusion/mptctl.c
559
87576
/* * linux/drivers/message/fusion/mptctl.c * mpt Ioctl driver. * For use with LSI PCI chip/adapters * running LSI Fusion MPT (Message Passing Technology) firmware. * * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. NO WARRANTY THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. DISCLAIMER OF LIABILITY NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> /* for mdelay */ #include <linux/miscdevice.h> #include <linux/smp_lock.h> #include <linux/compat.h> #include <asm/io.h> #include <asm/uaccess.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #define COPYRIGHT "Copyright (c) 1999-2008 LSI Corporation" #define MODULEAUTHOR "LSI Corporation" #include "mptbase.h" #include "mptctl.h" /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #define my_NAME "Fusion MPT misc device (ioctl) driver" #define my_VERSION MPT_LINUX_VERSION_COMMON #define MYNAM "mptctl" MODULE_AUTHOR(MODULEAUTHOR); MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); MODULE_VERSION(my_VERSION); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS; static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait ); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ struct buflist { u8 *kptr; int len; }; /* * Function prototypes. Called from OS entry point mptctl_ioctl. * arg contents specific to function. */ static int mptctl_fw_download(unsigned long arg); static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd); static int mptctl_gettargetinfo(unsigned long arg); static int mptctl_readtest(unsigned long arg); static int mptctl_mpt_command(unsigned long arg); static int mptctl_eventquery(unsigned long arg); static int mptctl_eventenable(unsigned long arg); static int mptctl_eventreport(unsigned long arg); static int mptctl_replace_fw(unsigned long arg); static int mptctl_do_reset(unsigned long arg); static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd); static int mptctl_hp_targetinfo(unsigned long arg); static int mptctl_probe(struct pci_dev *, const struct pci_device_id *); static void mptctl_remove(struct pci_dev *); #ifdef CONFIG_COMPAT static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg); #endif /* * Private function calls. */ static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr); static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen); static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc); /* * Reset Handler cleanup function */ static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); /* * Event Handler function */ static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); static struct fasync_struct *async_queue=NULL; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * Scatter gather list (SGL) sizes and limits... */ //#define MAX_SCSI_FRAGS 9 #define MAX_FRAGS_SPILL1 9 #define MAX_FRAGS_SPILL2 15 #define FRAGS_PER_BUCKET (MAX_FRAGS_SPILL2 + 1) //#define MAX_CHAIN_FRAGS 64 //#define MAX_CHAIN_FRAGS (15+15+15+16) #define MAX_CHAIN_FRAGS (4 * MAX_FRAGS_SPILL2 + 1) // Define max sg LIST bytes ( == (#frags + #chains) * 8 bytes each) // Works out to: 592d bytes! (9+1)*8 + 4*(15+1)*8 // ^----------------- 80 + 512 #define MAX_SGL_BYTES ((MAX_FRAGS_SPILL1 + 1 + (4 * FRAGS_PER_BUCKET)) * 8) /* linux only seems to ever give 128kB MAX contiguous (GFP_USER) mem bytes */ #define MAX_KMALLOC_SZ (128*1024) #define MPT_IOCTL_DEFAULT_TIMEOUT 10 /* Default timeout value (seconds) */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptctl_syscall_down - Down the MPT adapter syscall semaphore. * @ioc: Pointer to MPT adapter * @nonblock: boolean, non-zero if O_NONBLOCK is set * * All of the ioctl commands can potentially sleep, which is illegal * with a spinlock held, thus we perform mutual exclusion here. * * Returns negative errno on error, or zero for success. */ static inline int mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock) { int rc = 0; if (nonblock) { if (!mutex_trylock(&ioc->ioctl_cmds.mutex)) rc = -EAGAIN; } else { if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex)) rc = -ERESTARTSYS; } return rc; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * This is the callback for any message we have posted. The message itself * will be returned to the message pool when we return from the IRQ * * This runs in irq context so be short and sweet. */ static int mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) { char *sense_data; int req_index; int sz; if (!req) return 0; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function " "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function, req, reply)); /* * Handling continuation of the same reply. Processing the first * reply, and eating the other replys that come later. */ if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext) goto out_continuation; ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; if (!reply) goto out; ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID; sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength); memcpy(ioc->ioctl_cmds.reply, reply, sz); if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo) dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name, le16_to_cpu(reply->u.reply.IOCStatus), le32_to_cpu(reply->u.reply.IOCLogInfo))); if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) || (req->u.hdr.Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState) dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "scsi_status (0x%02x), scsi_state (0x%02x), " "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name, reply->u.sreply.SCSIStatus, reply->u.sreply.SCSIState, le16_to_cpu(reply->u.sreply.TaskTag), le32_to_cpu(reply->u.sreply.TransferCount))); if (reply->u.sreply.SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { sz = req->u.scsireq.SenseBufferLength; req_index = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); memcpy(ioc->ioctl_cmds.sense, sense_data, sz); ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID; } } out: /* We are done, issue wake up */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) mpt_clear_taskmgmt_in_progress_flag(ioc); ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING; complete(&ioc->ioctl_cmds.done); } out_continuation: if (reply && (reply->u.reply.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) return 0; return 1; } static int mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) { if (!mf) return 0; dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr)); ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; if (!mr) goto out; ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID; memcpy(ioc->taskmgmt_cmds.reply, mr, min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength)); out: if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { mpt_clear_taskmgmt_in_progress_flag(ioc); ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; complete(&ioc->taskmgmt_cmds.done); return 1; } return 0; } static int mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id) { MPT_FRAME_HDR *mf; SCSITaskMgmt_t *pScsiTm; SCSITaskMgmtReply_t *pScsiTmReply; int ii; int retval; unsigned long timeout; unsigned long time_count; u16 iocstatus; mutex_lock(&ioc->taskmgmt_cmds.mutex); if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { mutex_unlock(&ioc->taskmgmt_cmds.mutex); return -EPERM; } retval = 0; mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc); if (mf == NULL) { dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n", ioc->name)); mpt_clear_taskmgmt_in_progress_flag(ioc); retval = -ENOMEM; goto tm_done; } dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", ioc->name, mf)); pScsiTm = (SCSITaskMgmt_t *) mf; memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t)); pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; pScsiTm->TaskType = tm_type; if ((tm_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) && (ioc->bus_type == FC)) pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; pScsiTm->TargetID = target_id; pScsiTm->Bus = bus_id; pScsiTm->ChainOffset = 0; pScsiTm->Reserved = 0; pScsiTm->Reserved1 = 0; pScsiTm->TaskMsgContext = 0; for (ii= 0; ii < 8; ii++) pScsiTm->LUN[ii] = 0; for (ii=0; ii < 7; ii++) pScsiTm->Reserved2[ii] = 0; switch (ioc->bus_type) { case FC: timeout = 40; break; case SAS: timeout = 30; break; case SPI: default: timeout = 10; break; } dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n", ioc->name, tm_type, timeout)); INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) time_count = jiffies; if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf); else { retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc, sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP); if (retval != 0) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "TaskMgmt send_handshake FAILED!" " (ioc %p, mf %p, rc=%d) \n", ioc->name, ioc, mf, retval)); mpt_free_msg_frame(ioc, mf); mpt_clear_taskmgmt_in_progress_flag(ioc); goto tm_done; } } /* Now wait for the command to complete */ ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ); if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt failed\n", ioc->name)); mpt_free_msg_frame(ioc, mf); mpt_clear_taskmgmt_in_progress_flag(ioc); if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) retval = 0; else retval = -1; /* return failure */ goto tm_done; } if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt failed\n", ioc->name)); retval = -1; /* return failure */ goto tm_done; } pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply; dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, " "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, " "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus, pScsiTmReply->TargetID, tm_type, le16_to_cpu(pScsiTmReply->IOCStatus), le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode, le32_to_cpu(pScsiTmReply->TerminationCount))); iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED || iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED || iocstatus == MPI_IOCSTATUS_SUCCESS) retval = 0; else { dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt failed\n", ioc->name)); retval = -1; /* return failure */ } tm_done: mutex_unlock(&ioc->taskmgmt_cmds.mutex); CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) return retval; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* mptctl_timeout_expired * * Expecting an interrupt, however timed out. * */ static void mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) { unsigned long flags; int ret_val = -1; SCSIIORequest_t *scsi_req = (SCSIIORequest_t *) mf; u8 function = mf->u.hdr.Function; dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n", ioc->name, __func__)); if (mpt_fwfault_debug) mpt_halt_firmware(ioc); spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) mpt_free_msg_frame(ioc, mf); return; } spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) if (ioc->bus_type == SAS) { if (function == MPI_FUNCTION_SCSI_IO_REQUEST) ret_val = mptctl_do_taskmgmt(ioc, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, scsi_req->Bus, scsi_req->TargetID); else if (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) ret_val = mptctl_do_taskmgmt(ioc, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, scsi_req->Bus, 0); if (!ret_val) return; } else { if ((function == MPI_FUNCTION_SCSI_IO_REQUEST) || (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) ret_val = mptctl_do_taskmgmt(ioc, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, scsi_req->Bus, 0); if (!ret_val) return; } dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling Reset! \n", ioc->name)); mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); mpt_free_msg_frame(ioc, mf); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* mptctl_ioc_reset * * Clean-up functionality. Used only if there has been a * reload of the FW due. * */ static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { switch(reset_phase) { case MPT_IOC_SETUP_RESET: dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); break; case MPT_IOC_PRE_RESET: dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); break; case MPT_IOC_POST_RESET: dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET; complete(&ioc->ioctl_cmds.done); } break; default: break; } return 1; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* ASYNC Event Notification Support */ static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { u8 event; event = le32_to_cpu(pEvReply->Event) & 0xFF; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n", ioc->name, __func__)); if(async_queue == NULL) return 1; /* Raise SIGIO for persistent events. * TODO - this define is not in MPI spec yet, * but they plan to set it to 0x21 */ if (event == 0x21 ) { ioc->aen_event_read_flag=1; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n", ioc->name)); devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n", ioc->name)); kill_fasync(&async_queue, SIGIO, POLL_IN); return 1; } /* This flag is set after SIGIO was raised, and * remains set until the application has read * the event log via ioctl=MPTEVENTREPORT */ if(ioc->aen_event_read_flag) return 1; /* Signal only for the events that are * requested for by the application */ if (ioc->events && (ioc->eventTypes & ( 1 << event))) { ioc->aen_event_read_flag=1; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n", ioc->name)); devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n", ioc->name)); kill_fasync(&async_queue, SIGIO, POLL_IN); } return 1; } static int mptctl_fasync(int fd, struct file *filep, int mode) { MPT_ADAPTER *ioc; int ret; lock_kernel(); list_for_each_entry(ioc, &ioc_list, list) ioc->aen_event_read_flag=0; ret = fasync_helper(fd, filep, mode, &async_queue); unlock_kernel(); return ret; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * MPT ioctl handler * cmd - specify the particular IOCTL command to be issued * arg - data specific to the command. Must not be null. */ static long __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { mpt_ioctl_header __user *uhdr = (void __user *) arg; mpt_ioctl_header khdr; int iocnum; unsigned iocnumX; int nonblock = (file->f_flags & O_NONBLOCK); int ret; MPT_ADAPTER *iocp = NULL; if (copy_from_user(&khdr, uhdr, sizeof(khdr))) { printk(KERN_ERR MYNAM "%s::mptctl_ioctl() @%d - " "Unable to copy mpt_ioctl_header data @ %p\n", __FILE__, __LINE__, uhdr); return -EFAULT; } ret = -ENXIO; /* (-6) No such device or address */ /* Verify intended MPT adapter - set iocnum and the adapter * pointer (iocp) */ iocnumX = khdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) return -ENODEV; if (!iocp->active) { printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n", __FILE__, __LINE__); return -EFAULT; } /* Handle those commands that are just returning * information stored in the driver. * These commands should never time out and are unaffected * by TM and FW reloads. */ if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { return mptctl_getiocinfo(arg, _IOC_SIZE(cmd)); } else if (cmd == MPTTARGETINFO) { return mptctl_gettargetinfo(arg); } else if (cmd == MPTTEST) { return mptctl_readtest(arg); } else if (cmd == MPTEVENTQUERY) { return mptctl_eventquery(arg); } else if (cmd == MPTEVENTENABLE) { return mptctl_eventenable(arg); } else if (cmd == MPTEVENTREPORT) { return mptctl_eventreport(arg); } else if (cmd == MPTFWREPLACE) { return mptctl_replace_fw(arg); } /* All of these commands require an interrupt or * are unknown/illegal. */ if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; if (cmd == MPTFWDOWNLOAD) ret = mptctl_fw_download(arg); else if (cmd == MPTCOMMAND) ret = mptctl_mpt_command(arg); else if (cmd == MPTHARDRESET) ret = mptctl_do_reset(arg); else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); else if (cmd == HP_GETTARGETINFO) ret = mptctl_hp_targetinfo(arg); else ret = -EINVAL; mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; } static long mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; lock_kernel(); ret = __mptctl_ioctl(file, cmd, arg); unlock_kernel(); return ret; } static int mptctl_do_reset(unsigned long arg) { struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg; struct mpt_ioctl_diag_reset krinfo; MPT_ADAPTER *iocp; if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - " "Unable to copy mpt_ioctl_diag_reset struct @ %p\n", __FILE__, __LINE__, urinfo); return -EFAULT; } if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n", __FILE__, __LINE__, krinfo.hdr.iocnum); return -ENODEV; /* (-6) No such device or address */ } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n", iocp->name)); if (mpt_HardResetHandler(iocp, CAN_SLEEP) != 0) { printk (MYIOC_s_ERR_FMT "%s@%d::mptctl_do_reset - reset failed.\n", iocp->name, __FILE__, __LINE__); return -1; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * MPT FW download function. Cast the arg into the mpt_fw_xfer structure. * This structure contains: iocnum, firmware length (bytes), * pointer to user space memory where the fw image is stored. * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENXIO if no such device * -EAGAIN if resource problem * -ENOMEM if no memory for SGE * -EMLINK if too many chain buffers required * -EBADRQC if adapter does not support FW download * -EBUSY if adapter is busy * -ENOMSG if FW upload returned bad status */ static int mptctl_fw_download(unsigned long arg) { struct mpt_fw_xfer __user *ufwdl = (void __user *) arg; struct mpt_fw_xfer kfwdl; if (copy_from_user(&kfwdl, ufwdl, sizeof(struct mpt_fw_xfer))) { printk(KERN_ERR MYNAM "%s@%d::_ioctl_fwdl - " "Unable to copy mpt_fw_xfer struct @ %p\n", __FILE__, __LINE__, ufwdl); return -EFAULT; } return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * FW Download engine. * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENXIO if no such device * -EAGAIN if resource problem * -ENOMEM if no memory for SGE * -EMLINK if too many chain buffers required * -EBADRQC if adapter does not support FW download * -EBUSY if adapter is busy * -ENOMSG if FW upload returned bad status */ static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) { FWDownload_t *dlmsg; MPT_FRAME_HDR *mf; MPT_ADAPTER *iocp; FWDownloadTCSGE_t *ptsge; MptSge_t *sgl, *sgIn; char *sgOut; struct buflist *buflist; struct buflist *bl; dma_addr_t sgl_dma; int ret; int numfrags = 0; int maxfrags; int n = 0; u32 sgdir; u32 nib; int fw_bytes_copied = 0; int i; int sge_offset = 0; u16 iocstat; pFWDownloadReply_t ReplyMsg = NULL; unsigned long timeleft; if (mpt_verify_adapter(ioc, &iocp) < 0) { printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", ioc); return -ENODEV; /* (-6) No such device or address */ } else { /* Valid device. Get a message frame and construct the FW download message. */ if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) return -EAGAIN; } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.bufp = %p\n", iocp->name, ufwbuf)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n", iocp->name, (int)fwlen)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n", iocp->name, ioc)); dlmsg = (FWDownload_t*) mf; ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; sgOut = (char *) (ptsge + 1); /* * Construct f/w download request */ dlmsg->ImageType = MPI_FW_DOWNLOAD_ITYPE_FW; dlmsg->Reserved = 0; dlmsg->ChainOffset = 0; dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; if (iocp->facts.MsgVersion >= MPI_VERSION_01_05) dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT; else dlmsg->MsgFlags = 0; /* Set up the Transaction SGE. */ ptsge->Reserved = 0; ptsge->ContextSize = 0; ptsge->DetailsLength = 12; ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; ptsge->Reserved_0100_Checksum = 0; ptsge->ImageOffset = 0; ptsge->ImageSize = cpu_to_le32(fwlen); /* Add the SGL */ /* * Need to kmalloc area(s) for holding firmware image bytes. * But we need to do it piece meal, using a proper * scatter gather list (with 128kB MAX hunks). * * A practical limit here might be # of sg hunks that fit into * a single IOC request frame; 12 or 8 (see below), so: * For FC9xx: 12 x 128kB == 1.5 mB (max) * For C1030: 8 x 128kB == 1 mB (max) * We could support chaining, but things get ugly(ier:) * * Set the sge_offset to the start of the sgl (bytes). */ sgdir = 0x04000000; /* IOC will READ from sys mem */ sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t); if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset, &numfrags, &buflist, &sgl_dma, iocp)) == NULL) return -ENOMEM; /* * We should only need SGL with 2 simple_32bit entries (up to 256 kB) * for FC9xx f/w image, but calculate max number of sge hunks * we can fit into a request frame, and limit ourselves to that. * (currently no chain support) * maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE * Request maxfrags * 128 12 * 96 8 * 64 4 */ maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) / iocp->SGE_size; if (numfrags > maxfrags) { ret = -EMLINK; goto fwdl_out; } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: sgl buffer = %p, sgfrags = %d\n", iocp->name, sgl, numfrags)); /* * Parse SG list, copying sgl itself, * plus f/w image hunks from user space as we go... */ ret = -EFAULT; sgIn = sgl; bl = buflist; for (i=0; i < numfrags; i++) { /* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE * Skip everything but Simple. If simple, copy from * user space into kernel space. * Note: we should not have anything but Simple as * Chain SGE are illegal. */ nib = (sgIn->FlagsLength & 0x30000000) >> 28; if (nib == 0 || nib == 3) { ; } else if (sgIn->Address) { iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); n++; if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " "Unable to copy f/w buffer hunk#%d @ %p\n", iocp->name, __FILE__, __LINE__, n, ufwbuf); goto fwdl_out; } fw_bytes_copied += bl->len; } sgIn++; bl++; sgOut += iocp->SGE_size; } DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags); /* * Finally, perform firmware download. */ ReplyMsg = NULL; SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext); INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status) mpt_put_msg_frame(mptctl_id, iocp, mf); /* Now wait for the command to complete */ retry_wait: timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60); if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { ret = -ETIME; printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { mpt_free_msg_frame(iocp, mf); goto fwdl_out; } if (!timeleft) mptctl_timeout_expired(iocp, mf); else goto retry_wait; goto fwdl_out; } if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); mpt_free_msg_frame(iocp, mf); ret = -ENODATA; goto fwdl_out; } if (sgl) kfree_sgl(sgl, sgl_dma, buflist, iocp); ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply; iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; if (iocstat == MPI_IOCSTATUS_SUCCESS) { printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name); return 0; } else if (iocstat == MPI_IOCSTATUS_INVALID_FUNCTION) { printk(MYIOC_s_WARN_FMT "Hmmm... F/W download not supported!?!\n", iocp->name); printk(MYIOC_s_WARN_FMT "(time to go bang on somebodies door)\n", iocp->name); return -EBADRQC; } else if (iocstat == MPI_IOCSTATUS_BUSY) { printk(MYIOC_s_WARN_FMT "IOC_BUSY!\n", iocp->name); printk(MYIOC_s_WARN_FMT "(try again later?)\n", iocp->name); return -EBUSY; } else { printk(MYIOC_s_WARN_FMT "ioctl_fwdl() returned [bad] status = %04xh\n", iocp->name, iocstat); printk(MYIOC_s_WARN_FMT "(bad VooDoo)\n", iocp->name); return -ENOMSG; } return 0; fwdl_out: CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status); SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0); kfree_sgl(sgl, sgl_dma, buflist, iocp); return ret; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * SGE Allocation routine * * Inputs: bytes - number of bytes to be transferred * sgdir - data direction * sge_offset - offset (in bytes) from the start of the request * frame to the first SGE * ioc - pointer to the mptadapter * Outputs: frags - number of scatter gather elements * blp - point to the buflist pointer * sglbuf_dma - pointer to the (dma) sgl * Returns: Null if failes * pointer to the (virtual) sgl if successful. */ static MptSge_t * kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc) { MptSge_t *sglbuf = NULL; /* pointer to array of SGE */ /* and chain buffers */ struct buflist *buflist = NULL; /* kernel routine */ MptSge_t *sgl; int numfrags = 0; int fragcnt = 0; int alloc_sz = min(bytes,MAX_KMALLOC_SZ); // avoid kernel warning msg! int bytes_allocd = 0; int this_alloc; dma_addr_t pa; // phys addr int i, buflist_ent; int sg_spill = MAX_FRAGS_SPILL1; int dir; /* initialization */ *frags = 0; *blp = NULL; /* Allocate and initialize an array of kernel * structures for the SG elements. */ i = MAX_SGL_BYTES / 8; buflist = kzalloc(i, GFP_USER); if (!buflist) return NULL; buflist_ent = 0; /* Allocate a single block of memory to store the sg elements and * the chain buffers. The calling routine is responsible for * copying the data in this array into the correct place in the * request and chain buffers. */ sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma); if (sglbuf == NULL) goto free_and_fail; if (sgdir & 0x04000000) dir = PCI_DMA_TODEVICE; else dir = PCI_DMA_FROMDEVICE; /* At start: * sgl = sglbuf = point to beginning of sg buffer * buflist_ent = 0 = first kernel structure * sg_spill = number of SGE that can be written before the first * chain element. * */ sgl = sglbuf; sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1; while (bytes_allocd < bytes) { this_alloc = min(alloc_sz, bytes-bytes_allocd); buflist[buflist_ent].len = this_alloc; buflist[buflist_ent].kptr = pci_alloc_consistent(ioc->pcidev, this_alloc, &pa); if (buflist[buflist_ent].kptr == NULL) { alloc_sz = alloc_sz / 2; if (alloc_sz == 0) { printk(MYIOC_s_WARN_FMT "-SG: No can do - " "not enough memory! :-(\n", ioc->name); printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n", ioc->name, numfrags); goto free_and_fail; } continue; } else { dma_addr_t dma_addr; bytes_allocd += this_alloc; sgl->FlagsLength = (0x10000000|sgdir|this_alloc); dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir); sgl->Address = dma_addr; fragcnt++; numfrags++; sgl++; buflist_ent++; } if (bytes_allocd >= bytes) break; /* Need to chain? */ if (fragcnt == sg_spill) { printk(MYIOC_s_WARN_FMT "-SG: No can do - " "Chain required! :-(\n", ioc->name); printk(MYIOC_s_WARN_FMT "(freeing %d frags)\n", ioc->name, numfrags); goto free_and_fail; } /* overflow check... */ if (numfrags*8 > MAX_SGL_BYTES){ /* GRRRRR... */ printk(MYIOC_s_WARN_FMT "-SG: No can do - " "too many SG frags! :-(\n", ioc->name); printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n", ioc->name, numfrags); goto free_and_fail; } } /* Last sge fixup: set LE+eol+eob bits */ sgl[-1].FlagsLength |= 0xC1000000; *frags = numfrags; *blp = buflist; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: kbuf_alloc_2_sgl() - " "%d SG frags generated!\n", ioc->name, numfrags)); dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: kbuf_alloc_2_sgl() - " "last (big) alloc_sz=%d\n", ioc->name, alloc_sz)); return sglbuf; free_and_fail: if (sglbuf != NULL) { for (i = 0; i < numfrags; i++) { dma_addr_t dma_addr; u8 *kptr; int len; if ((sglbuf[i].FlagsLength >> 24) == 0x30) continue; dma_addr = sglbuf[i].Address; kptr = buflist[i].kptr; len = buflist[i].len; pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); } pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf, *sglbuf_dma); } kfree(buflist); return NULL; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * Routine to free the SGL elements. */ static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc) { MptSge_t *sg = sgl; struct buflist *bl = buflist; u32 nib; int dir; int n = 0; if (sg->FlagsLength & 0x04000000) dir = PCI_DMA_TODEVICE; else dir = PCI_DMA_FROMDEVICE; nib = (sg->FlagsLength & 0xF0000000) >> 28; while (! (nib & 0x4)) { /* eob */ /* skip ignore/chain. */ if (nib == 0 || nib == 3) { ; } else if (sg->Address) { dma_addr_t dma_addr; void *kptr; int len; dma_addr = sg->Address; kptr = bl->kptr; len = bl->len; pci_unmap_single(ioc->pcidev, dma_addr, len, dir); pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); n++; } sg++; bl++; nib = (le32_to_cpu(sg->FlagsLength) & 0xF0000000) >> 28; } /* we're at eob! */ if (sg->Address) { dma_addr_t dma_addr; void *kptr; int len; dma_addr = sg->Address; kptr = bl->kptr; len = bl->len; pci_unmap_single(ioc->pcidev, dma_addr, len, dir); pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); n++; } pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma); kfree(buflist); dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: Free'd 1 SGL buf + %d kbufs!\n", ioc->name, n)); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptctl_getiocinfo - Query the host adapter for IOC information. * @arg: User space argument * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENODEV if no such device/adapter */ static int mptctl_getiocinfo (unsigned long arg, unsigned int data_size) { struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_iocinfo *karg; MPT_ADAPTER *ioc; struct pci_dev *pdev; int iocnum; unsigned int port; int cim_rev; u8 revision; struct scsi_device *sdev; VirtDevice *vdevice; /* Add of PCI INFO results in unaligned access for * IA64 and Sparc. Reset long to int. Return no PCI * data for obsolete format. */ if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev0)) cim_rev = 0; else if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev1)) cim_rev = 1; else if (data_size == sizeof(struct mpt_ioctl_iocinfo)) cim_rev = 2; else if (data_size == (sizeof(struct mpt_ioctl_iocinfo_rev0)+12)) cim_rev = 0; /* obsolete */ else return -EFAULT; karg = kmalloc(data_size, GFP_KERNEL); if (karg == NULL) { printk(KERN_ERR MYNAM "%s::mpt_ioctl_iocinfo() @%d - no memory available!\n", __FILE__, __LINE__); return -ENOMEM; } if (copy_from_user(karg, uarg, data_size)) { printk(KERN_ERR MYNAM "%s@%d::mptctl_getiocinfo - " "Unable to read in mpt_ioctl_iocinfo struct @ %p\n", __FILE__, __LINE__, uarg); kfree(karg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); kfree(karg); return -ENODEV; } /* Verify the data transfer size is correct. */ if (karg->hdr.maxDataSize != data_size) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " "Structure size mismatch. Command not completed.\n", ioc->name, __FILE__, __LINE__); kfree(karg); return -EFAULT; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_getiocinfo called.\n", ioc->name)); /* Fill in the data and return the structure to the calling * program */ if (ioc->bus_type == SAS) karg->adapterType = MPT_IOCTL_INTERFACE_SAS; else if (ioc->bus_type == FC) karg->adapterType = MPT_IOCTL_INTERFACE_FC; else karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; if (karg->hdr.port > 1) return -EINVAL; port = karg->hdr.port; karg->port = port; pdev = (struct pci_dev *) ioc->pcidev; karg->pciId = pdev->device; pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); karg->hwRev = revision; karg->subSystemDevice = pdev->subsystem_device; karg->subSystemVendor = pdev->subsystem_vendor; if (cim_rev == 1) { /* Get the PCI bus, device, and function numbers for the IOC */ karg->pciInfo.u.bits.busNumber = pdev->bus->number; karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); } else if (cim_rev == 2) { /* Get the PCI bus, device, function and segment ID numbers for the IOC */ karg->pciInfo.u.bits.busNumber = pdev->bus->number; karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); } /* Get number of devices */ karg->numDevices = 0; if (ioc->sh) { shost_for_each_device(sdev, ioc->sh) { vdevice = sdev->hostdata; if (vdevice == NULL || vdevice->vtarget == NULL) continue; if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) continue; karg->numDevices++; } } /* Set the BIOS and FW Version */ karg->FWVersion = ioc->facts.FWVersion.Word; karg->BIOSVersion = ioc->biosVersion; /* Set the Version Strings. */ strncpy (karg->driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH); karg->driverVersion[MPT_IOCTL_VERSION_LENGTH-1]='\0'; karg->busChangeEvent = 0; karg->hostId = ioc->pfacts[port].PortSCSIID; karg->rsvd[0] = karg->rsvd[1] = 0; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, karg, data_size)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " "Unable to write out mpt_ioctl_iocinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); kfree(karg); return -EFAULT; } kfree(karg); return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptctl_gettargetinfo - Query the host adapter for target information. * @arg: User space argument * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENODEV if no such device/adapter */ static int mptctl_gettargetinfo (unsigned long arg) { struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_targetinfo karg; MPT_ADAPTER *ioc; VirtDevice *vdevice; char *pmem; int *pdata; int iocnum; int numDevices = 0; int lun; int maxWordsLeft; int numBytes; u8 port; struct scsi_device *sdev; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_gettargetinfo - " "Unable to read in mpt_ioctl_targetinfo struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n", ioc->name)); /* Get the port number and set the maximum number of bytes * in the returned structure. * Ignore the port setting. */ numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); maxWordsLeft = numBytes/sizeof(int); port = karg.hdr.port; if (maxWordsLeft <= 0) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", ioc->name, __FILE__, __LINE__); return -ENOMEM; } /* Fill in the data and return the structure to the calling * program */ /* struct mpt_ioctl_targetinfo does not contain sufficient space * for the target structures so when the IOCTL is called, there is * not sufficient stack space for the structure. Allocate memory, * populate the memory, copy back to the user, then free memory. * targetInfo format: * bits 31-24: reserved * 23-16: LUN * 15- 8: Bus Number * 7- 0: Target ID */ pmem = kzalloc(numBytes, GFP_KERNEL); if (!pmem) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", ioc->name, __FILE__, __LINE__); return -ENOMEM; } pdata = (int *) pmem; /* Get number of devices */ if (ioc->sh){ shost_for_each_device(sdev, ioc->sh) { if (!maxWordsLeft) continue; vdevice = sdev->hostdata; if (vdevice == NULL || vdevice->vtarget == NULL) continue; if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) continue; lun = (vdevice->vtarget->raidVolume) ? 0x80 : vdevice->lun; *pdata = (((u8)lun << 16) + (vdevice->vtarget->channel << 8) + (vdevice->vtarget->id )); pdata++; numDevices++; --maxWordsLeft; } } karg.numDevices = numDevices; /* Copy part of the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_targetinfo))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - " "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); kfree(pmem); return -EFAULT; } /* Copy the remaining data from kernel memory to user memory */ if (copy_to_user(uarg->targetInfo, pmem, numBytes)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - " "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, pdata); kfree(pmem); return -EFAULT; } kfree(pmem); return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* MPT IOCTL Test function. * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENODEV if no such device/adapter */ static int mptctl_readtest (unsigned long arg) { struct mpt_ioctl_test __user *uarg = (void __user *) arg; struct mpt_ioctl_test karg; MPT_ADAPTER *ioc; int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - " "Unable to read in mpt_ioctl_test struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n", ioc->name)); /* Fill in the data and return the structure to the calling * program */ #ifdef MFCNT karg.chip_type = ioc->mfcnt; #else karg.chip_type = ioc->pcidev->device; #endif strncpy (karg.name, ioc->name, MPT_MAX_NAME); karg.name[MPT_MAX_NAME-1]='\0'; strncpy (karg.product, ioc->prod_name, MPT_PRODUCT_LENGTH); karg.product[MPT_PRODUCT_LENGTH-1]='\0'; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_test))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_readtest - " "Unable to write out mpt_ioctl_test struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptctl_eventquery - Query the host adapter for the event types * that are being logged. * @arg: User space argument * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENODEV if no such device/adapter */ static int mptctl_eventquery (unsigned long arg) { struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg; struct mpt_ioctl_eventquery karg; MPT_ADAPTER *ioc; int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - " "Unable to read in mpt_ioctl_eventquery struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n", ioc->name)); karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; karg.eventTypes = ioc->eventTypes; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventquery - " "Unable to write out mpt_ioctl_eventquery struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptctl_eventenable (unsigned long arg) { struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg; struct mpt_ioctl_eventenable karg; MPT_ADAPTER *ioc; int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - " "Unable to read in mpt_ioctl_eventenable struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n", ioc->name)); if (ioc->events == NULL) { /* Have not yet allocated memory - do so now. */ int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); ioc->events = kzalloc(sz, GFP_KERNEL); if (!ioc->events) { printk(MYIOC_s_ERR_FMT ": ERROR - Insufficient memory to add adapter!\n", ioc->name); return -ENOMEM; } ioc->alloc_total += sz; ioc->eventContext = 0; } /* Update the IOC event logging flag. */ ioc->eventTypes = karg.eventTypes; return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptctl_eventreport (unsigned long arg) { struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg; struct mpt_ioctl_eventreport karg; MPT_ADAPTER *ioc; int iocnum; int numBytes, maxEvents, max; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventreport - " "Unable to read in mpt_ioctl_eventreport struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n", ioc->name)); numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents; /* If fewer than 1 event is requested, there must have * been some type of error. */ if ((max < 1) || !ioc->events) return -ENODATA; /* reset this flag so SIGIO can restart */ ioc->aen_event_read_flag=0; /* Copy the data from kernel memory to user memory */ numBytes = max * sizeof(MPT_IOCTL_EVENTS); if (copy_to_user(uarg->eventData, ioc->events, numBytes)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventreport - " "Unable to write out mpt_ioctl_eventreport struct @ %p\n", ioc->name, __FILE__, __LINE__, ioc->events); return -EFAULT; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptctl_replace_fw (unsigned long arg) { struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg; struct mpt_ioctl_replace_fw karg; MPT_ADAPTER *ioc; int iocnum; int newFwSize; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_replace_fw - " "Unable to read in mpt_ioctl_replace_fw struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n", ioc->name)); /* If caching FW, Free the old FW image */ if (ioc->cached_fw == NULL) return 0; mpt_free_fw_memory(ioc); /* Allocate memory for the new FW image */ newFwSize = karg.newImageSize; if (newFwSize & 0x01) newFwSize += 1; if (newFwSize & 0x02) newFwSize += 2; mpt_alloc_fw_memory(ioc, newFwSize); if (ioc->cached_fw == NULL) return -ENOMEM; /* Copy the data from user memory to kernel space */ if (copy_from_user(ioc->cached_fw, uarg->newImage, newFwSize)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_replace_fw - " "Unable to read in mpt_ioctl_replace_fw image " "@ %p\n", ioc->name, __FILE__, __LINE__, uarg); mpt_free_fw_memory(ioc); return -EFAULT; } /* Update IOCFactsReply */ ioc->facts.FWImageSize = newFwSize; return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* MPT IOCTL MPTCOMMAND function. * Cast the arg into the mpt_ioctl_mpt_command structure. * * Outputs: None. * Return: 0 if successful * -EBUSY if previous command timeout and IOC reset is not complete. * -EFAULT if data unavailable * -ENODEV if no such device/adapter * -ETIME if timer expires * -ENOMEM if memory allocation error */ static int mptctl_mpt_command (unsigned long arg) { struct mpt_ioctl_command __user *uarg = (void __user *) arg; struct mpt_ioctl_command karg; MPT_ADAPTER *ioc; int iocnum; int rc; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_command))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_mpt_command - " "Unable to read in mpt_ioctl_command struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } rc = mptctl_do_mpt_command (karg, &uarg->MF); return rc; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* Worker routine for the IOCTL MPTCOMMAND and MPTCOMMAND32 (sparc) commands. * * Outputs: None. * Return: 0 if successful * -EBUSY if previous command timeout and IOC reset is not complete. * -EFAULT if data unavailable * -ENODEV if no such device/adapter * -ETIME if timer expires * -ENOMEM if memory allocation error * -EPERM if SCSI I/O and target is untagged */ static int mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) { MPT_ADAPTER *ioc; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *hdr; char *psge; struct buflist bufIn; /* data In buffer */ struct buflist bufOut; /* data Out buffer */ dma_addr_t dma_addr_in; dma_addr_t dma_addr_out; int sgSize = 0; /* Num SG elements */ int iocnum, flagsLength; int sz, rc = 0; int msgContext; u16 req_idx; ulong timeout; unsigned long timeleft; struct scsi_device *sdev; unsigned long flags; u8 function; /* bufIn and bufOut are used for user to kernel space transfers */ bufIn.kptr = bufOut.kptr = NULL; bufIn.len = bufOut.len = 0; if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " "Busy with diagnostic reset\n", __FILE__, __LINE__); return -EBUSY; } spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); /* Verify that the final request frame will not be too large. */ sz = karg.dataSgeOffset * 4; if (karg.dataInSize > 0) sz += ioc->SGE_size; if (karg.dataOutSize > 0) sz += ioc->SGE_size; if (sz > ioc->req_sz) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Request frame too large (%d) maximum (%d)\n", ioc->name, __FILE__, __LINE__, sz, ioc->req_sz); return -EFAULT; } /* Get a free request frame and save the message context. */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) return -EAGAIN; hdr = (MPIHeader_t *) mf; msgContext = le32_to_cpu(hdr->MsgContext); req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); /* Copy the request frame * Reset the saved message context. * Request frame in user space */ if (copy_from_user(mf, mfPtr, karg.dataSgeOffset * 4)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to read MF from mpt_ioctl_command struct @ %p\n", ioc->name, __FILE__, __LINE__, mfPtr); function = -1; rc = -EFAULT; goto done_free_mem; } hdr->MsgContext = cpu_to_le32(msgContext); function = hdr->Function; /* Verify that this request is allowed. */ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", ioc->name, hdr->Function, mf)); switch (function) { case MPI_FUNCTION_IOC_FACTS: case MPI_FUNCTION_PORT_FACTS: karg.dataOutSize = karg.dataInSize = 0; break; case MPI_FUNCTION_CONFIG: { Config_t *config_frame; config_frame = (Config_t *)mf; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\ttype=0x%02x ext_type=0x%02x " "number=0x%02x action=0x%02x\n", ioc->name, config_frame->Header.PageType, config_frame->ExtPageType, config_frame->Header.PageNumber, config_frame->Action)); break; } case MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND: case MPI_FUNCTION_FC_EX_LINK_SRVC_SEND: case MPI_FUNCTION_FW_UPLOAD: case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: case MPI_FUNCTION_FW_DOWNLOAD: case MPI_FUNCTION_FC_PRIMITIVE_SEND: case MPI_FUNCTION_TOOLBOX: case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: break; case MPI_FUNCTION_SCSI_IO_REQUEST: if (ioc->sh) { SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; int qtag = MPI_SCSIIO_CONTROL_UNTAGGED; int scsidir = 0; int dataSize; u32 id; id = (ioc->devices_per_bus == 0) ? 256 : ioc->devices_per_bus; if (pScsiReq->TargetID > id) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Target ID out of bounds. \n", ioc->name, __FILE__, __LINE__); rc = -ENODEV; goto done_free_mem; } if (pScsiReq->Bus >= ioc->number_of_buses) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Target Bus out of bounds. \n", ioc->name, __FILE__, __LINE__); rc = -ENODEV; goto done_free_mem; } pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; pScsiReq->MsgFlags |= mpt_msg_flags(ioc); /* verify that app has not requested * more sense data than driver * can provide, if so, reset this parameter * set the sense buffer pointer low address * update the control field to specify Q type */ if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; else pScsiReq->SenseBufferLength = karg.maxSenseBytes; pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + (req_idx * MPT_SENSE_BUFFER_ALLOC)); shost_for_each_device(sdev, ioc->sh) { struct scsi_target *starget = scsi_target(sdev); VirtTarget *vtarget = starget->hostdata; if (vtarget == NULL) continue; if ((pScsiReq->TargetID == vtarget->id) && (pScsiReq->Bus == vtarget->channel) && (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; } /* Have the IOCTL driver set the direction based * on the dataOutSize (ordering issue with Sparc). */ if (karg.dataOutSize > 0) { scsidir = MPI_SCSIIO_CONTROL_WRITE; dataSize = karg.dataOutSize; } else { scsidir = MPI_SCSIIO_CONTROL_READ; dataSize = karg.dataInSize; } pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_SMP_PASSTHROUGH: /* Check mf->PassthruFlags to determine if * transfer is ImmediateMode or not. * Immediate mode returns data in the ReplyFrame. * Else, we are sending request and response data * in two SGLs at the end of the mf. */ break; case MPI_FUNCTION_SATA_PASSTHROUGH: if (!ioc->sh) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_RAID_ACTION: /* Just add a SGE */ break; case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: if (ioc->sh) { SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; int qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; int scsidir = MPI_SCSIIO_CONTROL_READ; int dataSize; pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; pScsiReq->MsgFlags |= mpt_msg_flags(ioc); /* verify that app has not requested * more sense data than driver * can provide, if so, reset this parameter * set the sense buffer pointer low address * update the control field to specify Q type */ if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; else pScsiReq->SenseBufferLength = karg.maxSenseBytes; pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + (req_idx * MPT_SENSE_BUFFER_ALLOC)); /* All commands to physical devices are tagged */ /* Have the IOCTL driver set the direction based * on the dataOutSize (ordering issue with Sparc). */ if (karg.dataOutSize > 0) { scsidir = MPI_SCSIIO_CONTROL_WRITE; dataSize = karg.dataOutSize; } else { scsidir = MPI_SCSIIO_CONTROL_READ; dataSize = karg.dataInSize; } pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_SCSI_TASK_MGMT: { SCSITaskMgmt_t *pScsiTm; pScsiTm = (SCSITaskMgmt_t *)mf; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tTaskType=0x%x MsgFlags=0x%x " "TaskMsgContext=0x%x id=%d channel=%d\n", ioc->name, pScsiTm->TaskType, le32_to_cpu (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags, pScsiTm->TargetID, pScsiTm->Bus)); break; } case MPI_FUNCTION_IOC_INIT: { IOCInit_t *pInit = (IOCInit_t *) mf; u32 high_addr, sense_high; /* Verify that all entries in the IOC INIT match * existing setup (and in LE format). */ if (sizeof(dma_addr_t) == sizeof(u64)) { high_addr = cpu_to_le32((u32)((u64)ioc->req_frames_dma >> 32)); sense_high= cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32)); } else { high_addr = 0; sense_high= 0; } if ((pInit->Flags != 0) || (pInit->MaxDevices != ioc->facts.MaxDevices) || (pInit->MaxBuses != ioc->facts.MaxBuses) || (pInit->ReplyFrameSize != cpu_to_le16(ioc->reply_sz)) || (pInit->HostMfaHighAddr != high_addr) || (pInit->SenseBufferHighAddr != sense_high)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "IOC_INIT issued with 1 or more incorrect parameters. Rejected.\n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } } break; default: /* * MPI_FUNCTION_PORT_ENABLE * MPI_FUNCTION_TARGET_CMD_BUFFER_POST * MPI_FUNCTION_TARGET_ASSIST * MPI_FUNCTION_TARGET_STATUS_SEND * MPI_FUNCTION_TARGET_MODE_ABORT * MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET * MPI_FUNCTION_IO_UNIT_RESET * MPI_FUNCTION_HANDSHAKE * MPI_FUNCTION_REPLY_FRAME_REMOVAL * MPI_FUNCTION_EVENT_NOTIFICATION * (driver handles event notification) * MPI_FUNCTION_EVENT_ACK */ /* What to do with these??? CHECK ME!!! MPI_FUNCTION_FC_LINK_SRVC_BUF_POST MPI_FUNCTION_FC_LINK_SRVC_RSP MPI_FUNCTION_FC_ABORT MPI_FUNCTION_LAN_SEND MPI_FUNCTION_LAN_RECEIVE MPI_FUNCTION_LAN_RESET */ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Illegal request (function 0x%x) \n", ioc->name, __FILE__, __LINE__, hdr->Function); rc = -EFAULT; goto done_free_mem; } /* Add the SGL ( at most one data in SGE and one data out SGE ) * In the case of two SGE's - the data out (write) will always * preceede the data in (read) SGE. psgList is used to free the * allocated memory. */ psge = (char *) (((int *) mf) + karg.dataSgeOffset); flagsLength = 0; if (karg.dataOutSize > 0) sgSize ++; if (karg.dataInSize > 0) sgSize ++; if (sgSize > 0) { /* Set up the dataOut memory allocation */ if (karg.dataOutSize > 0) { if (karg.dataInSize > 0) { flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_DIRECTION) << MPI_SGE_FLAGS_SHIFT; } else { flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; } flagsLength |= karg.dataOutSize; bufOut.len = karg.dataOutSize; bufOut.kptr = pci_alloc_consistent( ioc->pcidev, bufOut.len, &dma_addr_out); if (bufOut.kptr == NULL) { rc = -ENOMEM; goto done_free_mem; } else { /* Set up this SGE. * Copy to MF and to sglbuf */ ioc->add_sge(psge, flagsLength, dma_addr_out); psge += ioc->SGE_size; /* Copy user data to kernel space. */ if (copy_from_user(bufOut.kptr, karg.dataOutBufPtr, bufOut.len)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - Unable " "to read user data " "struct @ %p\n", ioc->name, __FILE__, __LINE__,karg.dataOutBufPtr); rc = -EFAULT; goto done_free_mem; } } } if (karg.dataInSize > 0) { flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; flagsLength |= karg.dataInSize; bufIn.len = karg.dataInSize; bufIn.kptr = pci_alloc_consistent(ioc->pcidev, bufIn.len, &dma_addr_in); if (bufIn.kptr == NULL) { rc = -ENOMEM; goto done_free_mem; } else { /* Set up this SGE * Copy to MF and to sglbuf */ ioc->add_sge(psge, flagsLength, dma_addr_in); } } } else { /* Add a NULL SGE */ ioc->add_sge(psge, flagsLength, (dma_addr_t) -1); } SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext); INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { mutex_lock(&ioc->taskmgmt_cmds.mutex); if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) mpt_put_msg_frame_hi_pri(mptctl_id, ioc, mf); else { rc =mpt_send_handshake_request(mptctl_id, ioc, sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); if (rc != 0) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED! (ioc %p, mf %p)\n", ioc->name, ioc, mf)); mpt_clear_taskmgmt_in_progress_flag(ioc); rc = -ENODATA; mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } } } else mpt_put_msg_frame(mptctl_id, ioc, mf); /* Now wait for the command to complete */ timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; retry_wait: timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, HZ*timeout); if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { rc = -ETIME; dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n", ioc->name, __func__)); if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } if (!timeleft) { if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); mptctl_timeout_expired(ioc, mf); mf = NULL; } else goto retry_wait; goto done_free_mem; } if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); mf = NULL; /* If a valid reply frame, copy to the user. * Offset 2: reply length in U32's */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) { if (karg.maxReplyBytes < ioc->reply_sz) { sz = min(karg.maxReplyBytes, 4*ioc->ioctl_cmds.reply[2]); } else { sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]); } if (sz > 0) { if (copy_to_user(karg.replyFrameBufPtr, ioc->ioctl_cmds.reply, sz)){ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write out reply frame %p\n", ioc->name, __FILE__, __LINE__, karg.replyFrameBufPtr); rc = -ENODATA; goto done_free_mem; } } } /* If valid sense data, copy to user. */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) { sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); if (sz > 0) { if (copy_to_user(karg.senseDataPtr, ioc->ioctl_cmds.sense, sz)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write sense data to user %p\n", ioc->name, __FILE__, __LINE__, karg.senseDataPtr); rc = -ENODATA; goto done_free_mem; } } } /* If the overall status is _GOOD and data in, copy data * to user. */ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) && (karg.dataInSize > 0) && (bufIn.kptr)) { if (copy_to_user(karg.dataInBufPtr, bufIn.kptr, karg.dataInSize)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write data to user %p\n", ioc->name, __FILE__, __LINE__, karg.dataInBufPtr); rc = -ENODATA; } } done_free_mem: CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); /* Free the allocated memory. */ if (bufOut.kptr != NULL) { pci_free_consistent(ioc->pcidev, bufOut.len, (void *) bufOut.kptr, dma_addr_out); } if (bufIn.kptr != NULL) { pci_free_consistent(ioc->pcidev, bufIn.len, (void *) bufIn.kptr, dma_addr_in); } /* mf is null if command issued successfully * otherwise, failure occured after mf acquired. */ if (mf) mpt_free_msg_frame(ioc, mf); return rc; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* Prototype Routine for the HOST INFO command. * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -EBUSY if previous command timeout and IOC reset is not complete. * -ENODEV if no such device/adapter * -ETIME if timer expires * -ENOMEM if memory allocation error */ static int mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) { hp_host_info_t __user *uarg = (void __user *) arg; MPT_ADAPTER *ioc; struct pci_dev *pdev; char *pbuf=NULL; dma_addr_t buf_dma; hp_host_info_t karg; CONFIGPARMS cfg; ConfigPageHeader_t hdr; int iocnum; int rc, cim_rev; ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *mpi_hdr; unsigned long timeleft; int retval; /* Reset long to int. Should affect IA64 and SPARC only */ if (data_size == sizeof(hp_host_info_t)) cim_rev = 1; else if (data_size == sizeof(hp_host_info_rev0_t)) cim_rev = 0; /* obsolete */ else return -EFAULT; if (copy_from_user(&karg, uarg, sizeof(hp_host_info_t))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_host_info - " "Unable to read in hp_host_info struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n", ioc->name)); /* Fill in the data and return the structure to the calling * program */ pdev = (struct pci_dev *) ioc->pcidev; karg.vendor = pdev->vendor; karg.device = pdev->device; karg.subsystem_id = pdev->subsystem_device; karg.subsystem_vendor = pdev->subsystem_vendor; karg.devfn = pdev->devfn; karg.bus = pdev->bus->number; /* Save the SCSI host no. if * SCSI driver loaded */ if (ioc->sh != NULL) karg.host_no = ioc->sh->host_no; else karg.host_no = -1; /* Reformat the fw_version into a string */ karg.fw_version[0] = ioc->facts.FWVersion.Struct.Major >= 10 ? ((ioc->facts.FWVersion.Struct.Major / 10) + '0') : '0'; karg.fw_version[1] = (ioc->facts.FWVersion.Struct.Major % 10 ) + '0'; karg.fw_version[2] = '.'; karg.fw_version[3] = ioc->facts.FWVersion.Struct.Minor >= 10 ? ((ioc->facts.FWVersion.Struct.Minor / 10) + '0') : '0'; karg.fw_version[4] = (ioc->facts.FWVersion.Struct.Minor % 10 ) + '0'; karg.fw_version[5] = '.'; karg.fw_version[6] = ioc->facts.FWVersion.Struct.Unit >= 10 ? ((ioc->facts.FWVersion.Struct.Unit / 10) + '0') : '0'; karg.fw_version[7] = (ioc->facts.FWVersion.Struct.Unit % 10 ) + '0'; karg.fw_version[8] = '.'; karg.fw_version[9] = ioc->facts.FWVersion.Struct.Dev >= 10 ? ((ioc->facts.FWVersion.Struct.Dev / 10) + '0') : '0'; karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0'; karg.fw_version[11] = '\0'; /* Issue a config request to get the device serial number */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.pageAddr = 0; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; /* read */ cfg.timeout = 10; strncpy(karg.serial_number, " ", 24); if (mpt_config(ioc, &cfg) == 0) { if (cfg.cfghdr.hdr->PageLength > 0) { /* Issue the second config page request */ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); if (pbuf) { cfg.physAddr = buf_dma; if (mpt_config(ioc, &cfg) == 0) { ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf; if (strlen(pdata->BoardTracerNumber) > 1) { strncpy(karg.serial_number, pdata->BoardTracerNumber, 24); karg.serial_number[24-1]='\0'; } } pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); pbuf = NULL; } } } rc = mpt_GetIocState(ioc, 1); switch (rc) { case MPI_IOC_STATE_OPERATIONAL: karg.ioc_status = HP_STATUS_OK; break; case MPI_IOC_STATE_FAULT: karg.ioc_status = HP_STATUS_FAILED; break; case MPI_IOC_STATE_RESET: case MPI_IOC_STATE_READY: default: karg.ioc_status = HP_STATUS_OTHER; break; } karg.base_io_addr = pci_resource_start(pdev, 0); if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) karg.bus_phys_width = HP_BUS_WIDTH_UNK; else karg.bus_phys_width = HP_BUS_WIDTH_16; karg.hard_resets = 0; karg.soft_resets = 0; karg.timeouts = 0; if (ioc->sh != NULL) { MPT_SCSI_HOST *hd = shost_priv(ioc->sh); if (hd && (cim_rev == 1)) { karg.hard_resets = ioc->hard_resets; karg.soft_resets = ioc->soft_resets; karg.timeouts = ioc->timeouts; } } /* * Gather ISTWI(Industry Standard Two Wire Interface) Data */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", ioc->name, __func__)); goto out; } IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf; mpi_hdr = (MPIHeader_t *) mf; memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t)); IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX; IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; IstwiRWRequest->MsgContext = mpi_hdr->MsgContext; IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ; IstwiRWRequest->NumAddressBytes = 0x01; IstwiRWRequest->DataLength = cpu_to_le16(0x04); if (pdev->devfn & 1) IstwiRWRequest->DeviceAddr = 0xB2; else IstwiRWRequest->DeviceAddr = 0xB0; pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); if (!pbuf) goto out; ioc->add_sge((char *)&IstwiRWRequest->SGL, (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); retval = 0; SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, IstwiRWRequest->MsgContext); INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) mpt_put_msg_frame(mptctl_id, ioc, mf); retry_wait: timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, HZ*MPT_IOCTL_DEFAULT_TIMEOUT); if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { retval = -ETIME; printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__); if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { mpt_free_msg_frame(ioc, mf); goto out; } if (!timeleft) mptctl_timeout_expired(ioc, mf); else goto retry_wait; goto out; } /* *ISTWI Data Definition * pbuf[0] = FW_VERSION = 0x4 * pbuf[1] = Bay Count = 6 or 4 or 2, depending on * the config, you should be seeing one out of these three values * pbuf[2] = Drive Installed Map = bit pattern depend on which * bays have drives in them * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) karg.rsvd = *(u32 *)pbuf; out: CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); if (pbuf) pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hpgethostinfo - " "Unable to write out hp_host_info @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* Prototype Routine for the TARGET INFO command. * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -EBUSY if previous command timeout and IOC reset is not complete. * -ENODEV if no such device/adapter * -ETIME if timer expires * -ENOMEM if memory allocation error */ static int mptctl_hp_targetinfo(unsigned long arg) { hp_target_info_t __user *uarg = (void __user *) arg; SCSIDevicePage0_t *pg0_alloc; SCSIDevicePage3_t *pg3_alloc; MPT_ADAPTER *ioc; MPT_SCSI_HOST *hd = NULL; hp_target_info_t karg; int iocnum; int data_sz; dma_addr_t page_dma; CONFIGPARMS cfg; ConfigPageHeader_t hdr; int tmp, np, rc = 0; if (copy_from_user(&karg, uarg, sizeof(hp_target_info_t))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_targetinfo - " "Unable to read in hp_host_targetinfo struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", ioc->name)); /* There is nothing to do for FCP parts. */ if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) return 0; if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) return 0; if (ioc->sh->host_no != karg.hdr.host) return -ENODEV; /* Get the data transfer speeds */ data_sz = ioc->spi_data.sdp0length * 4; pg0_alloc = (SCSIDevicePage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma); if (pg0_alloc) { hdr.PageVersion = ioc->spi_data.sdp0version; hdr.PageLength = data_sz; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; cfg.cfghdr.hdr = &hdr; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; cfg.dir = 0; cfg.timeout = 0; cfg.physAddr = page_dma; cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id; if ((rc = mpt_config(ioc, &cfg)) == 0) { np = le32_to_cpu(pg0_alloc->NegotiatedParameters); karg.negotiated_width = np & MPI_SCSIDEVPAGE0_NP_WIDE ? HP_BUS_WIDTH_16 : HP_BUS_WIDTH_8; if (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) { tmp = (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8; if (tmp < 0x09) karg.negotiated_speed = HP_DEV_SPEED_ULTRA320; else if (tmp <= 0x09) karg.negotiated_speed = HP_DEV_SPEED_ULTRA160; else if (tmp <= 0x0A) karg.negotiated_speed = HP_DEV_SPEED_ULTRA2; else if (tmp <= 0x0C) karg.negotiated_speed = HP_DEV_SPEED_ULTRA; else if (tmp <= 0x25) karg.negotiated_speed = HP_DEV_SPEED_FAST; else karg.negotiated_speed = HP_DEV_SPEED_ASYNC; } else karg.negotiated_speed = HP_DEV_SPEED_ASYNC; } pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg0_alloc, page_dma); } /* Set defaults */ karg.message_rejects = -1; karg.phase_errors = -1; karg.parity_errors = -1; karg.select_timeouts = -1; /* Get the target error parameters */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 3; hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; cfg.cfghdr.hdr = &hdr; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; cfg.timeout = 0; cfg.physAddr = -1; if ((mpt_config(ioc, &cfg) == 0) && (cfg.cfghdr.hdr->PageLength > 0)) { /* Issue the second config page request */ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; data_sz = (int) cfg.cfghdr.hdr->PageLength * 4; pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent( ioc->pcidev, data_sz, &page_dma); if (pg3_alloc) { cfg.physAddr = page_dma; cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id; if ((rc = mpt_config(ioc, &cfg)) == 0) { karg.message_rejects = (u32) le16_to_cpu(pg3_alloc->MsgRejectCount); karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount); karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount); } pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg3_alloc, page_dma); } } hd = shost_priv(ioc->sh); if (hd != NULL) karg.select_timeouts = hd->sel_timeout[karg.hdr.id]; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(hp_target_info_t))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hp_target_info - " "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static const struct file_operations mptctl_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .fasync = mptctl_fasync, .unlocked_ioctl = mptctl_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_mpctl_ioctl, #endif }; static struct miscdevice mptctl_miscdev = { MPT_MINOR, MYNAM, &mptctl_fops }; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #ifdef CONFIG_COMPAT static int compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_fw_xfer32 kfw32; struct mpt_fw_xfer kfw; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = kfw32.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mptfwxfer_ioctl() called\n", iocp->name)); kfw.iocnum = iocnum; kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; } static int compat_mpt_command(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_ioctl_command32 karg32; struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg; struct mpt_ioctl_command karg; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = karg32.hdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n", iocp->name)); /* Copy data to karg */ karg.hdr.iocnum = karg32.hdr.iocnum; karg.hdr.port = karg32.hdr.port; karg.timeout = karg32.timeout; karg.maxReplyBytes = karg32.maxReplyBytes; karg.dataInSize = karg32.dataInSize; karg.dataOutSize = karg32.dataOutSize; karg.maxSenseBytes = karg32.maxSenseBytes; karg.dataSgeOffset = karg32.dataSgeOffset; karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr; karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr; karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr; karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr; /* Pass new structure to do_mpt_command */ ret = mptctl_do_mpt_command (karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; } static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { long ret; lock_kernel(); switch (cmd) { case MPTIOCINFO: case MPTIOCINFO1: case MPTIOCINFO2: case MPTTARGETINFO: case MPTEVENTQUERY: case MPTEVENTENABLE: case MPTEVENTREPORT: case MPTHARDRESET: case HP_GETHOSTINFO: case HP_GETTARGETINFO: case MPTTEST: ret = __mptctl_ioctl(f, cmd, arg); break; case MPTCOMMAND32: ret = compat_mpt_command(f, cmd, arg); break; case MPTFWDOWNLOAD32: ret = compat_mptfwxfer_ioctl(f, cmd, arg); break; default: ret = -ENOIOCTLCMD; break; } unlock_kernel(); return ret; } #endif /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptctl_probe - Installs ioctl devices per bus. * @pdev: Pointer to pci_dev structure * * Returns 0 for success, non-zero for failure. * */ static int mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id) { MPT_ADAPTER *ioc = pci_get_drvdata(pdev); mutex_init(&ioc->ioctl_cmds.mutex); init_completion(&ioc->ioctl_cmds.done); return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptctl_remove - Removed ioctl devices * @pdev: Pointer to pci_dev structure * * */ static void mptctl_remove(struct pci_dev *pdev) { } static struct mpt_pci_driver mptctl_driver = { .probe = mptctl_probe, .remove = mptctl_remove, }; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int __init mptctl_init(void) { int err; int where = 1; show_mptmod_ver(my_NAME, my_VERSION); mpt_device_driver_register(&mptctl_driver, MPTCTL_DRIVER); /* Register this device */ err = misc_register(&mptctl_miscdev); if (err < 0) { printk(KERN_ERR MYNAM ": Can't register misc device [minor=%d].\n", MPT_MINOR); goto out_fail; } printk(KERN_INFO MYNAM ": Registered with Fusion MPT base driver\n"); printk(KERN_INFO MYNAM ": /dev/%s @ (major,minor=%d,%d)\n", mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor); /* * Install our handler */ ++where; mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER); if (!mptctl_id || mptctl_id >= MPT_MAX_PROTOCOL_DRIVERS) { printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n"); misc_deregister(&mptctl_miscdev); err = -EBUSY; goto out_fail; } mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER); if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) { printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n"); mpt_deregister(mptctl_id); misc_deregister(&mptctl_miscdev); err = -EBUSY; goto out_fail; } mpt_reset_register(mptctl_id, mptctl_ioc_reset); mpt_event_register(mptctl_id, mptctl_event_process); return 0; out_fail: mpt_device_driver_deregister(MPTCTL_DRIVER); return err; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static void mptctl_exit(void) { misc_deregister(&mptctl_miscdev); printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n", mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor); /* De-register event handler from base module */ mpt_event_deregister(mptctl_id); /* De-register reset handler from base module */ mpt_reset_deregister(mptctl_id); /* De-register callback handler from base module */ mpt_deregister(mptctl_taskmgmt_id); mpt_deregister(mptctl_id); mpt_device_driver_deregister(MPTCTL_DRIVER); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ module_init(mptctl_init); module_exit(mptctl_exit);
gpl-2.0
LightningZap/sgs4g_lz_kernel
sound/pci/ad1889.c
1071
27050
/* Analog Devices 1889 audio driver * * This is a driver for the AD1889 PCI audio chipset found * on the HP PA-RISC [BCJ]-xxx0 workstations. * * Copyright (C) 2004-2005, Kyle McMartin <kyle@parisc-linux.org> * Copyright (C) 2005, Thibaut Varene <varenet@parisc-linux.org> * Based on the OSS AD1889 driver by Randolph Chung <tausq@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * TODO: * Do we need to take care of CCS register? * Maybe we could use finer grained locking (separate locks for pb/cap)? * Wishlist: * Control Interface (mixer) support * Better AC97 support (VSR...)? * PM support * MIDI support * Game Port support * SG DMA support (this will need *alot* of work) */ #include <linux/init.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/compiler.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/ac97_codec.h> #include <asm/io.h> #include "ad1889.h" #include "ac97/ac97_id.h" #define AD1889_DRVVER "Version: 1.7" MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>, Thibaut Varene <t-bone@parisc-linux.org>"); MODULE_DESCRIPTION("Analog Devices AD1889 ALSA sound driver"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Analog Devices,AD1889}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the AD1889 soundcard."); static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the AD1889 soundcard."); static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable AD1889 soundcard."); static char *ac97_quirk[SNDRV_CARDS]; module_param_array(ac97_quirk, charp, NULL, 0444); MODULE_PARM_DESC(ac97_quirk, "AC'97 workaround for strange hardware."); #define DEVNAME "ad1889" #define PFX DEVNAME ": " /* let's use the global sound debug interfaces */ #define ad1889_debug(fmt, arg...) snd_printd(KERN_DEBUG fmt, ## arg) /* keep track of some hw registers */ struct ad1889_register_state { u16 reg; /* reg setup */ u32 addr; /* dma base address */ unsigned long size; /* DMA buffer size */ }; struct snd_ad1889 { struct snd_card *card; struct pci_dev *pci; int irq; unsigned long bar; void __iomem *iobase; struct snd_ac97 *ac97; struct snd_ac97_bus *ac97_bus; struct snd_pcm *pcm; struct snd_info_entry *proc; struct snd_pcm_substream *psubs; struct snd_pcm_substream *csubs; /* playback register state */ struct ad1889_register_state wave; struct ad1889_register_state ramc; spinlock_t lock; }; static inline u16 ad1889_readw(struct snd_ad1889 *chip, unsigned reg) { return readw(chip->iobase + reg); } static inline void ad1889_writew(struct snd_ad1889 *chip, unsigned reg, u16 val) { writew(val, chip->iobase + reg); } static inline u32 ad1889_readl(struct snd_ad1889 *chip, unsigned reg) { return readl(chip->iobase + reg); } static inline void ad1889_writel(struct snd_ad1889 *chip, unsigned reg, u32 val) { writel(val, chip->iobase + reg); } static inline void ad1889_unmute(struct snd_ad1889 *chip) { u16 st; st = ad1889_readw(chip, AD_DS_WADA) & ~(AD_DS_WADA_RWAM | AD_DS_WADA_LWAM); ad1889_writew(chip, AD_DS_WADA, st); ad1889_readw(chip, AD_DS_WADA); } static inline void ad1889_mute(struct snd_ad1889 *chip) { u16 st; st = ad1889_readw(chip, AD_DS_WADA) | AD_DS_WADA_RWAM | AD_DS_WADA_LWAM; ad1889_writew(chip, AD_DS_WADA, st); ad1889_readw(chip, AD_DS_WADA); } static inline void ad1889_load_adc_buffer_address(struct snd_ad1889 *chip, u32 address) { ad1889_writel(chip, AD_DMA_ADCBA, address); ad1889_writel(chip, AD_DMA_ADCCA, address); } static inline void ad1889_load_adc_buffer_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_ADCBC, count); ad1889_writel(chip, AD_DMA_ADCCC, count); } static inline void ad1889_load_adc_interrupt_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_ADCIB, count); ad1889_writel(chip, AD_DMA_ADCIC, count); } static inline void ad1889_load_wave_buffer_address(struct snd_ad1889 *chip, u32 address) { ad1889_writel(chip, AD_DMA_WAVBA, address); ad1889_writel(chip, AD_DMA_WAVCA, address); } static inline void ad1889_load_wave_buffer_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_WAVBC, count); ad1889_writel(chip, AD_DMA_WAVCC, count); } static inline void ad1889_load_wave_interrupt_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_WAVIB, count); ad1889_writel(chip, AD_DMA_WAVIC, count); } static void ad1889_channel_reset(struct snd_ad1889 *chip, unsigned int channel) { u16 reg; if (channel & AD_CHAN_WAV) { /* Disable wave channel */ reg = ad1889_readw(chip, AD_DS_WSMC) & ~AD_DS_WSMC_WAEN; ad1889_writew(chip, AD_DS_WSMC, reg); chip->wave.reg = reg; /* disable IRQs */ reg = ad1889_readw(chip, AD_DMA_WAV); reg &= AD_DMA_IM_DIS; reg &= ~AD_DMA_LOOP; ad1889_writew(chip, AD_DMA_WAV, reg); /* clear IRQ and address counters and pointers */ ad1889_load_wave_buffer_address(chip, 0x0); ad1889_load_wave_buffer_count(chip, 0x0); ad1889_load_wave_interrupt_count(chip, 0x0); /* flush */ ad1889_readw(chip, AD_DMA_WAV); } if (channel & AD_CHAN_ADC) { /* Disable ADC channel */ reg = ad1889_readw(chip, AD_DS_RAMC) & ~AD_DS_RAMC_ADEN; ad1889_writew(chip, AD_DS_RAMC, reg); chip->ramc.reg = reg; reg = ad1889_readw(chip, AD_DMA_ADC); reg &= AD_DMA_IM_DIS; reg &= ~AD_DMA_LOOP; ad1889_writew(chip, AD_DMA_ADC, reg); ad1889_load_adc_buffer_address(chip, 0x0); ad1889_load_adc_buffer_count(chip, 0x0); ad1889_load_adc_interrupt_count(chip, 0x0); /* flush */ ad1889_readw(chip, AD_DMA_ADC); } } static u16 snd_ad1889_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct snd_ad1889 *chip = ac97->private_data; return ad1889_readw(chip, AD_AC97_BASE + reg); } static void snd_ad1889_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct snd_ad1889 *chip = ac97->private_data; ad1889_writew(chip, AD_AC97_BASE + reg, val); } static int snd_ad1889_ac97_ready(struct snd_ad1889 *chip) { int retry = 400; /* average needs 352 msec */ while (!(ad1889_readw(chip, AD_AC97_ACIC) & AD_AC97_ACIC_ACRDY) && --retry) mdelay(1); if (!retry) { snd_printk(KERN_ERR PFX "[%s] Link is not ready.\n", __func__); return -EIO; } ad1889_debug("[%s] ready after %d ms\n", __func__, 400 - retry); return 0; } static int snd_ad1889_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_ad1889_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static struct snd_pcm_hardware snd_ad1889_playback_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, /* docs say 7000, but we're lazy */ .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = BUFFER_BYTES_MAX, .period_bytes_min = PERIOD_BYTES_MIN, .period_bytes_max = PERIOD_BYTES_MAX, .periods_min = PERIODS_MIN, .periods_max = PERIODS_MAX, /*.fifo_size = 0,*/ }; static struct snd_pcm_hardware snd_ad1889_capture_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, /* docs say we could to VSR, but we're lazy */ .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = BUFFER_BYTES_MAX, .period_bytes_min = PERIOD_BYTES_MIN, .period_bytes_max = PERIOD_BYTES_MAX, .periods_min = PERIODS_MIN, .periods_max = PERIODS_MAX, /*.fifo_size = 0,*/ }; static int snd_ad1889_playback_open(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; chip->psubs = ss; rt->hw = snd_ad1889_playback_hw; return 0; } static int snd_ad1889_capture_open(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; chip->csubs = ss; rt->hw = snd_ad1889_capture_hw; return 0; } static int snd_ad1889_playback_close(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); chip->psubs = NULL; return 0; } static int snd_ad1889_capture_close(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); chip->csubs = NULL; return 0; } static int snd_ad1889_playback_prepare(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(ss); unsigned int count = snd_pcm_lib_period_bytes(ss); u16 reg; ad1889_channel_reset(chip, AD_CHAN_WAV); reg = ad1889_readw(chip, AD_DS_WSMC); /* Mask out 16-bit / Stereo */ reg &= ~(AD_DS_WSMC_WA16 | AD_DS_WSMC_WAST); if (snd_pcm_format_width(rt->format) == 16) reg |= AD_DS_WSMC_WA16; if (rt->channels > 1) reg |= AD_DS_WSMC_WAST; /* let's make sure we don't clobber ourselves */ spin_lock_irq(&chip->lock); chip->wave.size = size; chip->wave.reg = reg; chip->wave.addr = rt->dma_addr; ad1889_writew(chip, AD_DS_WSMC, chip->wave.reg); /* Set sample rates on the codec */ ad1889_writew(chip, AD_DS_WAS, rt->rate); /* Set up DMA */ ad1889_load_wave_buffer_address(chip, chip->wave.addr); ad1889_load_wave_buffer_count(chip, size); ad1889_load_wave_interrupt_count(chip, count); /* writes flush */ ad1889_readw(chip, AD_DS_WSMC); spin_unlock_irq(&chip->lock); ad1889_debug("prepare playback: addr = 0x%x, count = %u, " "size = %u, reg = 0x%x, rate = %u\n", chip->wave.addr, count, size, reg, rt->rate); return 0; } static int snd_ad1889_capture_prepare(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(ss); unsigned int count = snd_pcm_lib_period_bytes(ss); u16 reg; ad1889_channel_reset(chip, AD_CHAN_ADC); reg = ad1889_readw(chip, AD_DS_RAMC); /* Mask out 16-bit / Stereo */ reg &= ~(AD_DS_RAMC_AD16 | AD_DS_RAMC_ADST); if (snd_pcm_format_width(rt->format) == 16) reg |= AD_DS_RAMC_AD16; if (rt->channels > 1) reg |= AD_DS_RAMC_ADST; /* let's make sure we don't clobber ourselves */ spin_lock_irq(&chip->lock); chip->ramc.size = size; chip->ramc.reg = reg; chip->ramc.addr = rt->dma_addr; ad1889_writew(chip, AD_DS_RAMC, chip->ramc.reg); /* Set up DMA */ ad1889_load_adc_buffer_address(chip, chip->ramc.addr); ad1889_load_adc_buffer_count(chip, size); ad1889_load_adc_interrupt_count(chip, count); /* writes flush */ ad1889_readw(chip, AD_DS_RAMC); spin_unlock_irq(&chip->lock); ad1889_debug("prepare capture: addr = 0x%x, count = %u, " "size = %u, reg = 0x%x, rate = %u\n", chip->ramc.addr, count, size, reg, rt->rate); return 0; } /* this is called in atomic context with IRQ disabled. Must be as fast as possible and not sleep. DMA should be *triggered* by this call. The WSMC "WAEN" bit triggers DMA Wave On/Off */ static int snd_ad1889_playback_trigger(struct snd_pcm_substream *ss, int cmd) { u16 wsmc; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); wsmc = ad1889_readw(chip, AD_DS_WSMC); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* enable DMA loop & interrupts */ ad1889_writew(chip, AD_DMA_WAV, AD_DMA_LOOP | AD_DMA_IM_CNT); wsmc |= AD_DS_WSMC_WAEN; /* 1 to clear CHSS bit */ ad1889_writel(chip, AD_DMA_CHSS, AD_DMA_CHSS_WAVS); ad1889_unmute(chip); break; case SNDRV_PCM_TRIGGER_STOP: ad1889_mute(chip); wsmc &= ~AD_DS_WSMC_WAEN; break; default: snd_BUG(); return -EINVAL; } chip->wave.reg = wsmc; ad1889_writew(chip, AD_DS_WSMC, wsmc); ad1889_readw(chip, AD_DS_WSMC); /* flush */ /* reset the chip when STOP - will disable IRQs */ if (cmd == SNDRV_PCM_TRIGGER_STOP) ad1889_channel_reset(chip, AD_CHAN_WAV); return 0; } /* this is called in atomic context with IRQ disabled. Must be as fast as possible and not sleep. DMA should be *triggered* by this call. The RAMC "ADEN" bit triggers DMA ADC On/Off */ static int snd_ad1889_capture_trigger(struct snd_pcm_substream *ss, int cmd) { u16 ramc; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); ramc = ad1889_readw(chip, AD_DS_RAMC); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* enable DMA loop & interrupts */ ad1889_writew(chip, AD_DMA_ADC, AD_DMA_LOOP | AD_DMA_IM_CNT); ramc |= AD_DS_RAMC_ADEN; /* 1 to clear CHSS bit */ ad1889_writel(chip, AD_DMA_CHSS, AD_DMA_CHSS_ADCS); break; case SNDRV_PCM_TRIGGER_STOP: ramc &= ~AD_DS_RAMC_ADEN; break; default: return -EINVAL; } chip->ramc.reg = ramc; ad1889_writew(chip, AD_DS_RAMC, ramc); ad1889_readw(chip, AD_DS_RAMC); /* flush */ /* reset the chip when STOP - will disable IRQs */ if (cmd == SNDRV_PCM_TRIGGER_STOP) ad1889_channel_reset(chip, AD_CHAN_ADC); return 0; } /* Called in atomic context with IRQ disabled */ static snd_pcm_uframes_t snd_ad1889_playback_pointer(struct snd_pcm_substream *ss) { size_t ptr = 0; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); if (unlikely(!(chip->wave.reg & AD_DS_WSMC_WAEN))) return 0; ptr = ad1889_readl(chip, AD_DMA_WAVCA); ptr -= chip->wave.addr; if (snd_BUG_ON(ptr >= chip->wave.size)) return 0; return bytes_to_frames(ss->runtime, ptr); } /* Called in atomic context with IRQ disabled */ static snd_pcm_uframes_t snd_ad1889_capture_pointer(struct snd_pcm_substream *ss) { size_t ptr = 0; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); if (unlikely(!(chip->ramc.reg & AD_DS_RAMC_ADEN))) return 0; ptr = ad1889_readl(chip, AD_DMA_ADCCA); ptr -= chip->ramc.addr; if (snd_BUG_ON(ptr >= chip->ramc.size)) return 0; return bytes_to_frames(ss->runtime, ptr); } static struct snd_pcm_ops snd_ad1889_playback_ops = { .open = snd_ad1889_playback_open, .close = snd_ad1889_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ad1889_hw_params, .hw_free = snd_ad1889_hw_free, .prepare = snd_ad1889_playback_prepare, .trigger = snd_ad1889_playback_trigger, .pointer = snd_ad1889_playback_pointer, }; static struct snd_pcm_ops snd_ad1889_capture_ops = { .open = snd_ad1889_capture_open, .close = snd_ad1889_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ad1889_hw_params, .hw_free = snd_ad1889_hw_free, .prepare = snd_ad1889_capture_prepare, .trigger = snd_ad1889_capture_trigger, .pointer = snd_ad1889_capture_pointer, }; static irqreturn_t snd_ad1889_interrupt(int irq, void *dev_id) { unsigned long st; struct snd_ad1889 *chip = dev_id; st = ad1889_readl(chip, AD_DMA_DISR); /* clear ISR */ ad1889_writel(chip, AD_DMA_DISR, st); st &= AD_INTR_MASK; if (unlikely(!st)) return IRQ_NONE; if (st & (AD_DMA_DISR_PMAI|AD_DMA_DISR_PTAI)) ad1889_debug("Unexpected master or target abort interrupt!\n"); if ((st & AD_DMA_DISR_WAVI) && chip->psubs) snd_pcm_period_elapsed(chip->psubs); if ((st & AD_DMA_DISR_ADCI) && chip->csubs) snd_pcm_period_elapsed(chip->csubs); return IRQ_HANDLED; } static int __devinit snd_ad1889_pcm_init(struct snd_ad1889 *chip, int device, struct snd_pcm **rpcm) { int err; struct snd_pcm *pcm; if (rpcm) *rpcm = NULL; err = snd_pcm_new(chip->card, chip->card->driver, device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ad1889_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ad1889_capture_ops); pcm->private_data = chip; pcm->info_flags = 0; strcpy(pcm->name, chip->card->shortname); chip->pcm = pcm; chip->psubs = NULL; chip->csubs = NULL; err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), BUFFER_BYTES_MAX / 2, BUFFER_BYTES_MAX); if (err < 0) { snd_printk(KERN_ERR PFX "buffer allocation error: %d\n", err); return err; } if (rpcm) *rpcm = pcm; return 0; } static void snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ad1889 *chip = entry->private_data; u16 reg; int tmp; reg = ad1889_readw(chip, AD_DS_WSMC); snd_iprintf(buffer, "Wave output: %s\n", (reg & AD_DS_WSMC_WAEN) ? "enabled" : "disabled"); snd_iprintf(buffer, "Wave Channels: %s\n", (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); snd_iprintf(buffer, "Wave Quality: %d-bit linear\n", (reg & AD_DS_WSMC_WA16) ? 16 : 8); /* WARQ is at offset 12 */ tmp = (reg & AD_DS_WSMC_WARQ) ? (((reg & AD_DS_WSMC_WARQ >> 12) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; snd_iprintf(buffer, "Wave FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); snd_iprintf(buffer, "Synthesis output: %s\n", reg & AD_DS_WSMC_SYEN ? "enabled" : "disabled"); /* SYRQ is at offset 4 */ tmp = (reg & AD_DS_WSMC_SYRQ) ? (((reg & AD_DS_WSMC_SYRQ >> 4) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; snd_iprintf(buffer, "Synthesis FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); reg = ad1889_readw(chip, AD_DS_RAMC); snd_iprintf(buffer, "ADC input: %s\n", (reg & AD_DS_RAMC_ADEN) ? "enabled" : "disabled"); snd_iprintf(buffer, "ADC Channels: %s\n", (reg & AD_DS_RAMC_ADST) ? "stereo" : "mono"); snd_iprintf(buffer, "ADC Quality: %d-bit linear\n", (reg & AD_DS_RAMC_AD16) ? 16 : 8); /* ACRQ is at offset 4 */ tmp = (reg & AD_DS_RAMC_ACRQ) ? (((reg & AD_DS_RAMC_ACRQ >> 4) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; snd_iprintf(buffer, "ADC FIFO: %d %s words\n\n", tmp, (reg & AD_DS_RAMC_ADST) ? "stereo" : "mono"); snd_iprintf(buffer, "Resampler input: %s\n", reg & AD_DS_RAMC_REEN ? "enabled" : "disabled"); /* RERQ is at offset 12 */ tmp = (reg & AD_DS_RAMC_RERQ) ? (((reg & AD_DS_RAMC_RERQ >> 12) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; snd_iprintf(buffer, "Resampler FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); /* doc says LSB represents -1.5dB, but the max value (-94.5dB) suggests that LSB is -3dB, which is more coherent with the logarithmic nature of the dB scale */ reg = ad1889_readw(chip, AD_DS_WADA); snd_iprintf(buffer, "Left: %s, -%d dB\n", (reg & AD_DS_WADA_LWAM) ? "mute" : "unmute", ((reg & AD_DS_WADA_LWAA) >> 8) * 3); reg = ad1889_readw(chip, AD_DS_WADA); snd_iprintf(buffer, "Right: %s, -%d dB\n", (reg & AD_DS_WADA_RWAM) ? "mute" : "unmute", ((reg & AD_DS_WADA_RWAA) >> 8) * 3); reg = ad1889_readw(chip, AD_DS_WAS); snd_iprintf(buffer, "Wave samplerate: %u Hz\n", reg); reg = ad1889_readw(chip, AD_DS_RES); snd_iprintf(buffer, "Resampler samplerate: %u Hz\n", reg); } static void __devinit snd_ad1889_proc_init(struct snd_ad1889 *chip) { struct snd_info_entry *entry; if (!snd_card_proc_new(chip->card, chip->card->driver, &entry)) snd_info_set_text_ops(entry, chip, snd_ad1889_proc_read); } static struct ac97_quirk ac97_quirks[] = { { .subvendor = 0x11d4, /* AD */ .subdevice = 0x1889, /* AD1889 */ .codec_id = AC97_ID_AD1819, .name = "AD1889", .type = AC97_TUNE_HP_ONLY }, { } /* terminator */ }; static void __devinit snd_ad1889_ac97_xinit(struct snd_ad1889 *chip) { u16 reg; reg = ad1889_readw(chip, AD_AC97_ACIC); reg |= AD_AC97_ACIC_ACRD; /* Reset Disable */ ad1889_writew(chip, AD_AC97_ACIC, reg); ad1889_readw(chip, AD_AC97_ACIC); /* flush posted write */ udelay(10); /* Interface Enable */ reg |= AD_AC97_ACIC_ACIE; ad1889_writew(chip, AD_AC97_ACIC, reg); snd_ad1889_ac97_ready(chip); /* Audio Stream Output | Variable Sample Rate Mode */ reg = ad1889_readw(chip, AD_AC97_ACIC); reg |= AD_AC97_ACIC_ASOE | AD_AC97_ACIC_VSRM; ad1889_writew(chip, AD_AC97_ACIC, reg); ad1889_readw(chip, AD_AC97_ACIC); /* flush posted write */ } static void snd_ad1889_ac97_bus_free(struct snd_ac97_bus *bus) { struct snd_ad1889 *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_ad1889_ac97_free(struct snd_ac97 *ac97) { struct snd_ad1889 *chip = ac97->private_data; chip->ac97 = NULL; } static int __devinit snd_ad1889_ac97_init(struct snd_ad1889 *chip, const char *quirk_override) { int err; struct snd_ac97_template ac97; static struct snd_ac97_bus_ops ops = { .write = snd_ad1889_ac97_write, .read = snd_ad1889_ac97_read, }; /* doing that here, it works. */ snd_ad1889_ac97_xinit(chip); err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus); if (err < 0) return err; chip->ac97_bus->private_free = snd_ad1889_ac97_bus_free; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_ad1889_ac97_free; ac97.pci = chip->pci; err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97); if (err < 0) return err; snd_ac97_tune_hardware(chip->ac97, ac97_quirks, quirk_override); return 0; } static int snd_ad1889_free(struct snd_ad1889 *chip) { if (chip->irq < 0) goto skip_hw; spin_lock_irq(&chip->lock); ad1889_mute(chip); /* Turn off interrupt on count and zero DMA registers */ ad1889_channel_reset(chip, AD_CHAN_WAV | AD_CHAN_ADC); /* clear DISR. If we don't, we'd better jump off the Eiffel Tower */ ad1889_writel(chip, AD_DMA_DISR, AD_DMA_DISR_PTAI | AD_DMA_DISR_PMAI); ad1889_readl(chip, AD_DMA_DISR); /* flush, dammit! */ spin_unlock_irq(&chip->lock); if (chip->irq >= 0) free_irq(chip->irq, chip); skip_hw: if (chip->iobase) iounmap(chip->iobase); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_ad1889_dev_free(struct snd_device *device) { struct snd_ad1889 *chip = device->device_data; return snd_ad1889_free(chip); } static int __devinit snd_ad1889_init(struct snd_ad1889 *chip) { ad1889_writew(chip, AD_DS_CCS, AD_DS_CCS_CLKEN); /* turn on clock */ ad1889_readw(chip, AD_DS_CCS); /* flush posted write */ mdelay(10); /* enable Master and Target abort interrupts */ ad1889_writel(chip, AD_DMA_DISR, AD_DMA_DISR_PMAE | AD_DMA_DISR_PTAE); return 0; } static int __devinit snd_ad1889_create(struct snd_card *card, struct pci_dev *pci, struct snd_ad1889 **rchip) { int err; struct snd_ad1889 *chip; static struct snd_device_ops ops = { .dev_free = snd_ad1889_dev_free, }; *rchip = NULL; if ((err = pci_enable_device(pci)) < 0) return err; /* check PCI availability (32bit DMA) */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) { printk(KERN_ERR PFX "error setting 32-bit DMA mask.\n"); pci_disable_device(pci); return -ENXIO; } /* allocate chip specific data with zero-filled memory */ if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; card->private_data = chip; chip->pci = pci; chip->irq = -1; /* (1) PCI resource allocation */ if ((err = pci_request_regions(pci, card->driver)) < 0) goto free_and_ret; chip->bar = pci_resource_start(pci, 0); chip->iobase = pci_ioremap_bar(pci, 0); if (chip->iobase == NULL) { printk(KERN_ERR PFX "unable to reserve region.\n"); err = -EBUSY; goto free_and_ret; } pci_set_master(pci); spin_lock_init(&chip->lock); /* only now can we call ad1889_free */ if (request_irq(pci->irq, snd_ad1889_interrupt, IRQF_SHARED, card->driver, chip)) { printk(KERN_ERR PFX "cannot obtain IRQ %d\n", pci->irq); snd_ad1889_free(chip); return -EBUSY; } chip->irq = pci->irq; synchronize_irq(chip->irq); /* (2) initialization of the chip hardware */ if ((err = snd_ad1889_init(chip)) < 0) { snd_ad1889_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_ad1889_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; free_and_ret: kfree(chip); pci_disable_device(pci); return err; } static int __devinit snd_ad1889_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { int err; static int devno; struct snd_card *card; struct snd_ad1889 *chip; /* (1) */ if (devno >= SNDRV_CARDS) return -ENODEV; if (!enable[devno]) { devno++; return -ENOENT; } /* (2) */ err = snd_card_create(index[devno], id[devno], THIS_MODULE, 0, &card); /* XXX REVISIT: we can probably allocate chip in this call */ if (err < 0) return err; strcpy(card->driver, "AD1889"); strcpy(card->shortname, "Analog Devices AD1889"); /* (3) */ err = snd_ad1889_create(card, pci, &chip); if (err < 0) goto free_and_ret; /* (4) */ sprintf(card->longname, "%s at 0x%lx irq %i", card->shortname, chip->bar, chip->irq); /* (5) */ /* register AC97 mixer */ err = snd_ad1889_ac97_init(chip, ac97_quirk[devno]); if (err < 0) goto free_and_ret; err = snd_ad1889_pcm_init(chip, 0, NULL); if (err < 0) goto free_and_ret; /* register proc interface */ snd_ad1889_proc_init(chip); /* (6) */ err = snd_card_register(card); if (err < 0) goto free_and_ret; /* (7) */ pci_set_drvdata(pci, card); devno++; return 0; free_and_ret: snd_card_free(card); return err; } static void __devexit snd_ad1889_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static DEFINE_PCI_DEVICE_TABLE(snd_ad1889_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_ANALOG_DEVICES, PCI_DEVICE_ID_AD1889JS) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, snd_ad1889_ids); static struct pci_driver ad1889_pci_driver = { .name = "AD1889 Audio", .id_table = snd_ad1889_ids, .probe = snd_ad1889_probe, .remove = __devexit_p(snd_ad1889_remove), }; static int __init alsa_ad1889_init(void) { return pci_register_driver(&ad1889_pci_driver); } static void __exit alsa_ad1889_fini(void) { pci_unregister_driver(&ad1889_pci_driver); } module_init(alsa_ad1889_init); module_exit(alsa_ad1889_fini);
gpl-2.0
Bdaman80/BD-Ace2
sound/pci/ad1889.c
1071
27050
/* Analog Devices 1889 audio driver * * This is a driver for the AD1889 PCI audio chipset found * on the HP PA-RISC [BCJ]-xxx0 workstations. * * Copyright (C) 2004-2005, Kyle McMartin <kyle@parisc-linux.org> * Copyright (C) 2005, Thibaut Varene <varenet@parisc-linux.org> * Based on the OSS AD1889 driver by Randolph Chung <tausq@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * TODO: * Do we need to take care of CCS register? * Maybe we could use finer grained locking (separate locks for pb/cap)? * Wishlist: * Control Interface (mixer) support * Better AC97 support (VSR...)? * PM support * MIDI support * Game Port support * SG DMA support (this will need *alot* of work) */ #include <linux/init.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/compiler.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/ac97_codec.h> #include <asm/io.h> #include "ad1889.h" #include "ac97/ac97_id.h" #define AD1889_DRVVER "Version: 1.7" MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>, Thibaut Varene <t-bone@parisc-linux.org>"); MODULE_DESCRIPTION("Analog Devices AD1889 ALSA sound driver"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Analog Devices,AD1889}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the AD1889 soundcard."); static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the AD1889 soundcard."); static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable AD1889 soundcard."); static char *ac97_quirk[SNDRV_CARDS]; module_param_array(ac97_quirk, charp, NULL, 0444); MODULE_PARM_DESC(ac97_quirk, "AC'97 workaround for strange hardware."); #define DEVNAME "ad1889" #define PFX DEVNAME ": " /* let's use the global sound debug interfaces */ #define ad1889_debug(fmt, arg...) snd_printd(KERN_DEBUG fmt, ## arg) /* keep track of some hw registers */ struct ad1889_register_state { u16 reg; /* reg setup */ u32 addr; /* dma base address */ unsigned long size; /* DMA buffer size */ }; struct snd_ad1889 { struct snd_card *card; struct pci_dev *pci; int irq; unsigned long bar; void __iomem *iobase; struct snd_ac97 *ac97; struct snd_ac97_bus *ac97_bus; struct snd_pcm *pcm; struct snd_info_entry *proc; struct snd_pcm_substream *psubs; struct snd_pcm_substream *csubs; /* playback register state */ struct ad1889_register_state wave; struct ad1889_register_state ramc; spinlock_t lock; }; static inline u16 ad1889_readw(struct snd_ad1889 *chip, unsigned reg) { return readw(chip->iobase + reg); } static inline void ad1889_writew(struct snd_ad1889 *chip, unsigned reg, u16 val) { writew(val, chip->iobase + reg); } static inline u32 ad1889_readl(struct snd_ad1889 *chip, unsigned reg) { return readl(chip->iobase + reg); } static inline void ad1889_writel(struct snd_ad1889 *chip, unsigned reg, u32 val) { writel(val, chip->iobase + reg); } static inline void ad1889_unmute(struct snd_ad1889 *chip) { u16 st; st = ad1889_readw(chip, AD_DS_WADA) & ~(AD_DS_WADA_RWAM | AD_DS_WADA_LWAM); ad1889_writew(chip, AD_DS_WADA, st); ad1889_readw(chip, AD_DS_WADA); } static inline void ad1889_mute(struct snd_ad1889 *chip) { u16 st; st = ad1889_readw(chip, AD_DS_WADA) | AD_DS_WADA_RWAM | AD_DS_WADA_LWAM; ad1889_writew(chip, AD_DS_WADA, st); ad1889_readw(chip, AD_DS_WADA); } static inline void ad1889_load_adc_buffer_address(struct snd_ad1889 *chip, u32 address) { ad1889_writel(chip, AD_DMA_ADCBA, address); ad1889_writel(chip, AD_DMA_ADCCA, address); } static inline void ad1889_load_adc_buffer_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_ADCBC, count); ad1889_writel(chip, AD_DMA_ADCCC, count); } static inline void ad1889_load_adc_interrupt_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_ADCIB, count); ad1889_writel(chip, AD_DMA_ADCIC, count); } static inline void ad1889_load_wave_buffer_address(struct snd_ad1889 *chip, u32 address) { ad1889_writel(chip, AD_DMA_WAVBA, address); ad1889_writel(chip, AD_DMA_WAVCA, address); } static inline void ad1889_load_wave_buffer_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_WAVBC, count); ad1889_writel(chip, AD_DMA_WAVCC, count); } static inline void ad1889_load_wave_interrupt_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_WAVIB, count); ad1889_writel(chip, AD_DMA_WAVIC, count); } static void ad1889_channel_reset(struct snd_ad1889 *chip, unsigned int channel) { u16 reg; if (channel & AD_CHAN_WAV) { /* Disable wave channel */ reg = ad1889_readw(chip, AD_DS_WSMC) & ~AD_DS_WSMC_WAEN; ad1889_writew(chip, AD_DS_WSMC, reg); chip->wave.reg = reg; /* disable IRQs */ reg = ad1889_readw(chip, AD_DMA_WAV); reg &= AD_DMA_IM_DIS; reg &= ~AD_DMA_LOOP; ad1889_writew(chip, AD_DMA_WAV, reg); /* clear IRQ and address counters and pointers */ ad1889_load_wave_buffer_address(chip, 0x0); ad1889_load_wave_buffer_count(chip, 0x0); ad1889_load_wave_interrupt_count(chip, 0x0); /* flush */ ad1889_readw(chip, AD_DMA_WAV); } if (channel & AD_CHAN_ADC) { /* Disable ADC channel */ reg = ad1889_readw(chip, AD_DS_RAMC) & ~AD_DS_RAMC_ADEN; ad1889_writew(chip, AD_DS_RAMC, reg); chip->ramc.reg = reg; reg = ad1889_readw(chip, AD_DMA_ADC); reg &= AD_DMA_IM_DIS; reg &= ~AD_DMA_LOOP; ad1889_writew(chip, AD_DMA_ADC, reg); ad1889_load_adc_buffer_address(chip, 0x0); ad1889_load_adc_buffer_count(chip, 0x0); ad1889_load_adc_interrupt_count(chip, 0x0); /* flush */ ad1889_readw(chip, AD_DMA_ADC); } } static u16 snd_ad1889_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct snd_ad1889 *chip = ac97->private_data; return ad1889_readw(chip, AD_AC97_BASE + reg); } static void snd_ad1889_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct snd_ad1889 *chip = ac97->private_data; ad1889_writew(chip, AD_AC97_BASE + reg, val); } static int snd_ad1889_ac97_ready(struct snd_ad1889 *chip) { int retry = 400; /* average needs 352 msec */ while (!(ad1889_readw(chip, AD_AC97_ACIC) & AD_AC97_ACIC_ACRDY) && --retry) mdelay(1); if (!retry) { snd_printk(KERN_ERR PFX "[%s] Link is not ready.\n", __func__); return -EIO; } ad1889_debug("[%s] ready after %d ms\n", __func__, 400 - retry); return 0; } static int snd_ad1889_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_ad1889_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static struct snd_pcm_hardware snd_ad1889_playback_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, /* docs say 7000, but we're lazy */ .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = BUFFER_BYTES_MAX, .period_bytes_min = PERIOD_BYTES_MIN, .period_bytes_max = PERIOD_BYTES_MAX, .periods_min = PERIODS_MIN, .periods_max = PERIODS_MAX, /*.fifo_size = 0,*/ }; static struct snd_pcm_hardware snd_ad1889_capture_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, /* docs say we could to VSR, but we're lazy */ .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = BUFFER_BYTES_MAX, .period_bytes_min = PERIOD_BYTES_MIN, .period_bytes_max = PERIOD_BYTES_MAX, .periods_min = PERIODS_MIN, .periods_max = PERIODS_MAX, /*.fifo_size = 0,*/ }; static int snd_ad1889_playback_open(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; chip->psubs = ss; rt->hw = snd_ad1889_playback_hw; return 0; } static int snd_ad1889_capture_open(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; chip->csubs = ss; rt->hw = snd_ad1889_capture_hw; return 0; } static int snd_ad1889_playback_close(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); chip->psubs = NULL; return 0; } static int snd_ad1889_capture_close(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); chip->csubs = NULL; return 0; } static int snd_ad1889_playback_prepare(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(ss); unsigned int count = snd_pcm_lib_period_bytes(ss); u16 reg; ad1889_channel_reset(chip, AD_CHAN_WAV); reg = ad1889_readw(chip, AD_DS_WSMC); /* Mask out 16-bit / Stereo */ reg &= ~(AD_DS_WSMC_WA16 | AD_DS_WSMC_WAST); if (snd_pcm_format_width(rt->format) == 16) reg |= AD_DS_WSMC_WA16; if (rt->channels > 1) reg |= AD_DS_WSMC_WAST; /* let's make sure we don't clobber ourselves */ spin_lock_irq(&chip->lock); chip->wave.size = size; chip->wave.reg = reg; chip->wave.addr = rt->dma_addr; ad1889_writew(chip, AD_DS_WSMC, chip->wave.reg); /* Set sample rates on the codec */ ad1889_writew(chip, AD_DS_WAS, rt->rate); /* Set up DMA */ ad1889_load_wave_buffer_address(chip, chip->wave.addr); ad1889_load_wave_buffer_count(chip, size); ad1889_load_wave_interrupt_count(chip, count); /* writes flush */ ad1889_readw(chip, AD_DS_WSMC); spin_unlock_irq(&chip->lock); ad1889_debug("prepare playback: addr = 0x%x, count = %u, " "size = %u, reg = 0x%x, rate = %u\n", chip->wave.addr, count, size, reg, rt->rate); return 0; } static int snd_ad1889_capture_prepare(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(ss); unsigned int count = snd_pcm_lib_period_bytes(ss); u16 reg; ad1889_channel_reset(chip, AD_CHAN_ADC); reg = ad1889_readw(chip, AD_DS_RAMC); /* Mask out 16-bit / Stereo */ reg &= ~(AD_DS_RAMC_AD16 | AD_DS_RAMC_ADST); if (snd_pcm_format_width(rt->format) == 16) reg |= AD_DS_RAMC_AD16; if (rt->channels > 1) reg |= AD_DS_RAMC_ADST; /* let's make sure we don't clobber ourselves */ spin_lock_irq(&chip->lock); chip->ramc.size = size; chip->ramc.reg = reg; chip->ramc.addr = rt->dma_addr; ad1889_writew(chip, AD_DS_RAMC, chip->ramc.reg); /* Set up DMA */ ad1889_load_adc_buffer_address(chip, chip->ramc.addr); ad1889_load_adc_buffer_count(chip, size); ad1889_load_adc_interrupt_count(chip, count); /* writes flush */ ad1889_readw(chip, AD_DS_RAMC); spin_unlock_irq(&chip->lock); ad1889_debug("prepare capture: addr = 0x%x, count = %u, " "size = %u, reg = 0x%x, rate = %u\n", chip->ramc.addr, count, size, reg, rt->rate); return 0; } /* this is called in atomic context with IRQ disabled. Must be as fast as possible and not sleep. DMA should be *triggered* by this call. The WSMC "WAEN" bit triggers DMA Wave On/Off */ static int snd_ad1889_playback_trigger(struct snd_pcm_substream *ss, int cmd) { u16 wsmc; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); wsmc = ad1889_readw(chip, AD_DS_WSMC); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* enable DMA loop & interrupts */ ad1889_writew(chip, AD_DMA_WAV, AD_DMA_LOOP | AD_DMA_IM_CNT); wsmc |= AD_DS_WSMC_WAEN; /* 1 to clear CHSS bit */ ad1889_writel(chip, AD_DMA_CHSS, AD_DMA_CHSS_WAVS); ad1889_unmute(chip); break; case SNDRV_PCM_TRIGGER_STOP: ad1889_mute(chip); wsmc &= ~AD_DS_WSMC_WAEN; break; default: snd_BUG(); return -EINVAL; } chip->wave.reg = wsmc; ad1889_writew(chip, AD_DS_WSMC, wsmc); ad1889_readw(chip, AD_DS_WSMC); /* flush */ /* reset the chip when STOP - will disable IRQs */ if (cmd == SNDRV_PCM_TRIGGER_STOP) ad1889_channel_reset(chip, AD_CHAN_WAV); return 0; } /* this is called in atomic context with IRQ disabled. Must be as fast as possible and not sleep. DMA should be *triggered* by this call. The RAMC "ADEN" bit triggers DMA ADC On/Off */ static int snd_ad1889_capture_trigger(struct snd_pcm_substream *ss, int cmd) { u16 ramc; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); ramc = ad1889_readw(chip, AD_DS_RAMC); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* enable DMA loop & interrupts */ ad1889_writew(chip, AD_DMA_ADC, AD_DMA_LOOP | AD_DMA_IM_CNT); ramc |= AD_DS_RAMC_ADEN; /* 1 to clear CHSS bit */ ad1889_writel(chip, AD_DMA_CHSS, AD_DMA_CHSS_ADCS); break; case SNDRV_PCM_TRIGGER_STOP: ramc &= ~AD_DS_RAMC_ADEN; break; default: return -EINVAL; } chip->ramc.reg = ramc; ad1889_writew(chip, AD_DS_RAMC, ramc); ad1889_readw(chip, AD_DS_RAMC); /* flush */ /* reset the chip when STOP - will disable IRQs */ if (cmd == SNDRV_PCM_TRIGGER_STOP) ad1889_channel_reset(chip, AD_CHAN_ADC); return 0; } /* Called in atomic context with IRQ disabled */ static snd_pcm_uframes_t snd_ad1889_playback_pointer(struct snd_pcm_substream *ss) { size_t ptr = 0; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); if (unlikely(!(chip->wave.reg & AD_DS_WSMC_WAEN))) return 0; ptr = ad1889_readl(chip, AD_DMA_WAVCA); ptr -= chip->wave.addr; if (snd_BUG_ON(ptr >= chip->wave.size)) return 0; return bytes_to_frames(ss->runtime, ptr); } /* Called in atomic context with IRQ disabled */ static snd_pcm_uframes_t snd_ad1889_capture_pointer(struct snd_pcm_substream *ss) { size_t ptr = 0; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); if (unlikely(!(chip->ramc.reg & AD_DS_RAMC_ADEN))) return 0; ptr = ad1889_readl(chip, AD_DMA_ADCCA); ptr -= chip->ramc.addr; if (snd_BUG_ON(ptr >= chip->ramc.size)) return 0; return bytes_to_frames(ss->runtime, ptr); } static struct snd_pcm_ops snd_ad1889_playback_ops = { .open = snd_ad1889_playback_open, .close = snd_ad1889_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ad1889_hw_params, .hw_free = snd_ad1889_hw_free, .prepare = snd_ad1889_playback_prepare, .trigger = snd_ad1889_playback_trigger, .pointer = snd_ad1889_playback_pointer, }; static struct snd_pcm_ops snd_ad1889_capture_ops = { .open = snd_ad1889_capture_open, .close = snd_ad1889_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ad1889_hw_params, .hw_free = snd_ad1889_hw_free, .prepare = snd_ad1889_capture_prepare, .trigger = snd_ad1889_capture_trigger, .pointer = snd_ad1889_capture_pointer, }; static irqreturn_t snd_ad1889_interrupt(int irq, void *dev_id) { unsigned long st; struct snd_ad1889 *chip = dev_id; st = ad1889_readl(chip, AD_DMA_DISR); /* clear ISR */ ad1889_writel(chip, AD_DMA_DISR, st); st &= AD_INTR_MASK; if (unlikely(!st)) return IRQ_NONE; if (st & (AD_DMA_DISR_PMAI|AD_DMA_DISR_PTAI)) ad1889_debug("Unexpected master or target abort interrupt!\n"); if ((st & AD_DMA_DISR_WAVI) && chip->psubs) snd_pcm_period_elapsed(chip->psubs); if ((st & AD_DMA_DISR_ADCI) && chip->csubs) snd_pcm_period_elapsed(chip->csubs); return IRQ_HANDLED; } static int __devinit snd_ad1889_pcm_init(struct snd_ad1889 *chip, int device, struct snd_pcm **rpcm) { int err; struct snd_pcm *pcm; if (rpcm) *rpcm = NULL; err = snd_pcm_new(chip->card, chip->card->driver, device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ad1889_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ad1889_capture_ops); pcm->private_data = chip; pcm->info_flags = 0; strcpy(pcm->name, chip->card->shortname); chip->pcm = pcm; chip->psubs = NULL; chip->csubs = NULL; err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), BUFFER_BYTES_MAX / 2, BUFFER_BYTES_MAX); if (err < 0) { snd_printk(KERN_ERR PFX "buffer allocation error: %d\n", err); return err; } if (rpcm) *rpcm = pcm; return 0; } static void snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ad1889 *chip = entry->private_data; u16 reg; int tmp; reg = ad1889_readw(chip, AD_DS_WSMC); snd_iprintf(buffer, "Wave output: %s\n", (reg & AD_DS_WSMC_WAEN) ? "enabled" : "disabled"); snd_iprintf(buffer, "Wave Channels: %s\n", (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); snd_iprintf(buffer, "Wave Quality: %d-bit linear\n", (reg & AD_DS_WSMC_WA16) ? 16 : 8); /* WARQ is at offset 12 */ tmp = (reg & AD_DS_WSMC_WARQ) ? (((reg & AD_DS_WSMC_WARQ >> 12) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; snd_iprintf(buffer, "Wave FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); snd_iprintf(buffer, "Synthesis output: %s\n", reg & AD_DS_WSMC_SYEN ? "enabled" : "disabled"); /* SYRQ is at offset 4 */ tmp = (reg & AD_DS_WSMC_SYRQ) ? (((reg & AD_DS_WSMC_SYRQ >> 4) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; snd_iprintf(buffer, "Synthesis FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); reg = ad1889_readw(chip, AD_DS_RAMC); snd_iprintf(buffer, "ADC input: %s\n", (reg & AD_DS_RAMC_ADEN) ? "enabled" : "disabled"); snd_iprintf(buffer, "ADC Channels: %s\n", (reg & AD_DS_RAMC_ADST) ? "stereo" : "mono"); snd_iprintf(buffer, "ADC Quality: %d-bit linear\n", (reg & AD_DS_RAMC_AD16) ? 16 : 8); /* ACRQ is at offset 4 */ tmp = (reg & AD_DS_RAMC_ACRQ) ? (((reg & AD_DS_RAMC_ACRQ >> 4) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; snd_iprintf(buffer, "ADC FIFO: %d %s words\n\n", tmp, (reg & AD_DS_RAMC_ADST) ? "stereo" : "mono"); snd_iprintf(buffer, "Resampler input: %s\n", reg & AD_DS_RAMC_REEN ? "enabled" : "disabled"); /* RERQ is at offset 12 */ tmp = (reg & AD_DS_RAMC_RERQ) ? (((reg & AD_DS_RAMC_RERQ >> 12) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; snd_iprintf(buffer, "Resampler FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); /* doc says LSB represents -1.5dB, but the max value (-94.5dB) suggests that LSB is -3dB, which is more coherent with the logarithmic nature of the dB scale */ reg = ad1889_readw(chip, AD_DS_WADA); snd_iprintf(buffer, "Left: %s, -%d dB\n", (reg & AD_DS_WADA_LWAM) ? "mute" : "unmute", ((reg & AD_DS_WADA_LWAA) >> 8) * 3); reg = ad1889_readw(chip, AD_DS_WADA); snd_iprintf(buffer, "Right: %s, -%d dB\n", (reg & AD_DS_WADA_RWAM) ? "mute" : "unmute", ((reg & AD_DS_WADA_RWAA) >> 8) * 3); reg = ad1889_readw(chip, AD_DS_WAS); snd_iprintf(buffer, "Wave samplerate: %u Hz\n", reg); reg = ad1889_readw(chip, AD_DS_RES); snd_iprintf(buffer, "Resampler samplerate: %u Hz\n", reg); } static void __devinit snd_ad1889_proc_init(struct snd_ad1889 *chip) { struct snd_info_entry *entry; if (!snd_card_proc_new(chip->card, chip->card->driver, &entry)) snd_info_set_text_ops(entry, chip, snd_ad1889_proc_read); } static struct ac97_quirk ac97_quirks[] = { { .subvendor = 0x11d4, /* AD */ .subdevice = 0x1889, /* AD1889 */ .codec_id = AC97_ID_AD1819, .name = "AD1889", .type = AC97_TUNE_HP_ONLY }, { } /* terminator */ }; static void __devinit snd_ad1889_ac97_xinit(struct snd_ad1889 *chip) { u16 reg; reg = ad1889_readw(chip, AD_AC97_ACIC); reg |= AD_AC97_ACIC_ACRD; /* Reset Disable */ ad1889_writew(chip, AD_AC97_ACIC, reg); ad1889_readw(chip, AD_AC97_ACIC); /* flush posted write */ udelay(10); /* Interface Enable */ reg |= AD_AC97_ACIC_ACIE; ad1889_writew(chip, AD_AC97_ACIC, reg); snd_ad1889_ac97_ready(chip); /* Audio Stream Output | Variable Sample Rate Mode */ reg = ad1889_readw(chip, AD_AC97_ACIC); reg |= AD_AC97_ACIC_ASOE | AD_AC97_ACIC_VSRM; ad1889_writew(chip, AD_AC97_ACIC, reg); ad1889_readw(chip, AD_AC97_ACIC); /* flush posted write */ } static void snd_ad1889_ac97_bus_free(struct snd_ac97_bus *bus) { struct snd_ad1889 *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_ad1889_ac97_free(struct snd_ac97 *ac97) { struct snd_ad1889 *chip = ac97->private_data; chip->ac97 = NULL; } static int __devinit snd_ad1889_ac97_init(struct snd_ad1889 *chip, const char *quirk_override) { int err; struct snd_ac97_template ac97; static struct snd_ac97_bus_ops ops = { .write = snd_ad1889_ac97_write, .read = snd_ad1889_ac97_read, }; /* doing that here, it works. */ snd_ad1889_ac97_xinit(chip); err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus); if (err < 0) return err; chip->ac97_bus->private_free = snd_ad1889_ac97_bus_free; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_ad1889_ac97_free; ac97.pci = chip->pci; err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97); if (err < 0) return err; snd_ac97_tune_hardware(chip->ac97, ac97_quirks, quirk_override); return 0; } static int snd_ad1889_free(struct snd_ad1889 *chip) { if (chip->irq < 0) goto skip_hw; spin_lock_irq(&chip->lock); ad1889_mute(chip); /* Turn off interrupt on count and zero DMA registers */ ad1889_channel_reset(chip, AD_CHAN_WAV | AD_CHAN_ADC); /* clear DISR. If we don't, we'd better jump off the Eiffel Tower */ ad1889_writel(chip, AD_DMA_DISR, AD_DMA_DISR_PTAI | AD_DMA_DISR_PMAI); ad1889_readl(chip, AD_DMA_DISR); /* flush, dammit! */ spin_unlock_irq(&chip->lock); if (chip->irq >= 0) free_irq(chip->irq, chip); skip_hw: if (chip->iobase) iounmap(chip->iobase); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_ad1889_dev_free(struct snd_device *device) { struct snd_ad1889 *chip = device->device_data; return snd_ad1889_free(chip); } static int __devinit snd_ad1889_init(struct snd_ad1889 *chip) { ad1889_writew(chip, AD_DS_CCS, AD_DS_CCS_CLKEN); /* turn on clock */ ad1889_readw(chip, AD_DS_CCS); /* flush posted write */ mdelay(10); /* enable Master and Target abort interrupts */ ad1889_writel(chip, AD_DMA_DISR, AD_DMA_DISR_PMAE | AD_DMA_DISR_PTAE); return 0; } static int __devinit snd_ad1889_create(struct snd_card *card, struct pci_dev *pci, struct snd_ad1889 **rchip) { int err; struct snd_ad1889 *chip; static struct snd_device_ops ops = { .dev_free = snd_ad1889_dev_free, }; *rchip = NULL; if ((err = pci_enable_device(pci)) < 0) return err; /* check PCI availability (32bit DMA) */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) { printk(KERN_ERR PFX "error setting 32-bit DMA mask.\n"); pci_disable_device(pci); return -ENXIO; } /* allocate chip specific data with zero-filled memory */ if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; card->private_data = chip; chip->pci = pci; chip->irq = -1; /* (1) PCI resource allocation */ if ((err = pci_request_regions(pci, card->driver)) < 0) goto free_and_ret; chip->bar = pci_resource_start(pci, 0); chip->iobase = pci_ioremap_bar(pci, 0); if (chip->iobase == NULL) { printk(KERN_ERR PFX "unable to reserve region.\n"); err = -EBUSY; goto free_and_ret; } pci_set_master(pci); spin_lock_init(&chip->lock); /* only now can we call ad1889_free */ if (request_irq(pci->irq, snd_ad1889_interrupt, IRQF_SHARED, card->driver, chip)) { printk(KERN_ERR PFX "cannot obtain IRQ %d\n", pci->irq); snd_ad1889_free(chip); return -EBUSY; } chip->irq = pci->irq; synchronize_irq(chip->irq); /* (2) initialization of the chip hardware */ if ((err = snd_ad1889_init(chip)) < 0) { snd_ad1889_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_ad1889_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; free_and_ret: kfree(chip); pci_disable_device(pci); return err; } static int __devinit snd_ad1889_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { int err; static int devno; struct snd_card *card; struct snd_ad1889 *chip; /* (1) */ if (devno >= SNDRV_CARDS) return -ENODEV; if (!enable[devno]) { devno++; return -ENOENT; } /* (2) */ err = snd_card_create(index[devno], id[devno], THIS_MODULE, 0, &card); /* XXX REVISIT: we can probably allocate chip in this call */ if (err < 0) return err; strcpy(card->driver, "AD1889"); strcpy(card->shortname, "Analog Devices AD1889"); /* (3) */ err = snd_ad1889_create(card, pci, &chip); if (err < 0) goto free_and_ret; /* (4) */ sprintf(card->longname, "%s at 0x%lx irq %i", card->shortname, chip->bar, chip->irq); /* (5) */ /* register AC97 mixer */ err = snd_ad1889_ac97_init(chip, ac97_quirk[devno]); if (err < 0) goto free_and_ret; err = snd_ad1889_pcm_init(chip, 0, NULL); if (err < 0) goto free_and_ret; /* register proc interface */ snd_ad1889_proc_init(chip); /* (6) */ err = snd_card_register(card); if (err < 0) goto free_and_ret; /* (7) */ pci_set_drvdata(pci, card); devno++; return 0; free_and_ret: snd_card_free(card); return err; } static void __devexit snd_ad1889_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static DEFINE_PCI_DEVICE_TABLE(snd_ad1889_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_ANALOG_DEVICES, PCI_DEVICE_ID_AD1889JS) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, snd_ad1889_ids); static struct pci_driver ad1889_pci_driver = { .name = "AD1889 Audio", .id_table = snd_ad1889_ids, .probe = snd_ad1889_probe, .remove = __devexit_p(snd_ad1889_remove), }; static int __init alsa_ad1889_init(void) { return pci_register_driver(&ad1889_pci_driver); } static void __exit alsa_ad1889_fini(void) { pci_unregister_driver(&ad1889_pci_driver); } module_init(alsa_ad1889_init); module_exit(alsa_ad1889_fini);
gpl-2.0
CyanogenMod/android_kernel_samsung_t1
drivers/hwmon/pmbus_core.c
1583
40982
/* * Hardware monitoring driver for PMBus devices * * Copyright (c) 2010, 2011 Ericsson AB. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/delay.h> #include <linux/i2c/pmbus.h> #include "pmbus.h" /* * Constants needed to determine number of sensors, booleans, and labels. */ #define PMBUS_MAX_INPUT_SENSORS 11 /* 6*volt, 3*curr, 2*power */ #define PMBUS_VOUT_SENSORS_PER_PAGE 5 /* input, min, max, lcrit, crit */ #define PMBUS_IOUT_SENSORS_PER_PAGE 4 /* input, min, max, crit */ #define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */ #define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */ #define PMBUS_MAX_SENSORS_PER_TEMP 5 /* input, min, max, lcrit, crit */ #define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm, lcrit_alarm, crit_alarm; c: alarm, crit_alarm; p: crit_alarm */ #define PMBUS_VOUT_BOOLEANS_PER_PAGE 4 /* min_alarm, max_alarm, lcrit_alarm, crit_alarm */ #define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm, crit_alarm */ #define PMBUS_POUT_BOOLEANS_PER_PAGE 3 /* cap_alarm, alarm, crit_alarm */ #define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */ #define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm, lcrit_alarm, crit_alarm */ #define PMBUS_MAX_INPUT_LABELS 4 /* vin, vcap, iin, pin */ /* * status, status_vout, status_iout, status_fans, status_fan34, and status_temp * are paged. status_input is unpaged. */ #define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1) /* * Index into status register array, per status register group */ #define PB_STATUS_BASE 0 #define PB_STATUS_VOUT_BASE (PB_STATUS_BASE + PMBUS_PAGES) #define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES) #define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES) #define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES) #define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES) #define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1) struct pmbus_sensor { char name[I2C_NAME_SIZE]; /* sysfs sensor name */ struct sensor_device_attribute attribute; u8 page; /* page number */ u8 reg; /* register */ enum pmbus_sensor_classes class; /* sensor class */ bool update; /* runtime sensor update needed */ int data; /* Sensor data. Negative if there was a read error */ }; struct pmbus_boolean { char name[I2C_NAME_SIZE]; /* sysfs boolean name */ struct sensor_device_attribute attribute; }; struct pmbus_label { char name[I2C_NAME_SIZE]; /* sysfs label name */ struct sensor_device_attribute attribute; char label[I2C_NAME_SIZE]; /* label */ }; struct pmbus_data { struct device *hwmon_dev; u32 flags; /* from platform data */ int exponent; /* linear mode: exponent for output voltages */ const struct pmbus_driver_info *info; int max_attributes; int num_attributes; struct attribute **attributes; struct attribute_group group; /* * Sensors cover both sensor and limit registers. */ int max_sensors; int num_sensors; struct pmbus_sensor *sensors; /* * Booleans are used for alarms. * Values are determined from status registers. */ int max_booleans; int num_booleans; struct pmbus_boolean *booleans; /* * Labels are used to map generic names (e.g., "in1") * to PMBus specific names (e.g., "vin" or "vout1"). */ int max_labels; int num_labels; struct pmbus_label *labels; struct mutex update_lock; bool valid; unsigned long last_updated; /* in jiffies */ /* * A single status register covers multiple attributes, * so we keep them all together. */ u8 status[PB_NUM_STATUS_REG]; u8 currpage; }; int pmbus_set_page(struct i2c_client *client, u8 page) { struct pmbus_data *data = i2c_get_clientdata(client); int rv = 0; int newpage; if (page != data->currpage) { rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE); if (newpage != page) rv = -EINVAL; else data->currpage = page; } return rv; } EXPORT_SYMBOL_GPL(pmbus_set_page); static int pmbus_write_byte(struct i2c_client *client, u8 page, u8 value) { int rv; rv = pmbus_set_page(client, page); if (rv < 0) return rv; return i2c_smbus_write_byte(client, value); } static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) { int rv; rv = pmbus_set_page(client, page); if (rv < 0) return rv; return i2c_smbus_write_word_data(client, reg, word); } int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg) { int rv; rv = pmbus_set_page(client, page); if (rv < 0) return rv; return i2c_smbus_read_word_data(client, reg); } EXPORT_SYMBOL_GPL(pmbus_read_word_data); static int pmbus_read_byte_data(struct i2c_client *client, u8 page, u8 reg) { int rv; rv = pmbus_set_page(client, page); if (rv < 0) return rv; return i2c_smbus_read_byte_data(client, reg); } static void pmbus_clear_fault_page(struct i2c_client *client, int page) { pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); } void pmbus_clear_faults(struct i2c_client *client) { struct pmbus_data *data = i2c_get_clientdata(client); int i; for (i = 0; i < data->info->pages; i++) pmbus_clear_fault_page(client, i); } EXPORT_SYMBOL_GPL(pmbus_clear_faults); static int pmbus_check_status_cml(struct i2c_client *client, int page) { int status, status2; status = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE); if (status < 0 || (status & PB_STATUS_CML)) { status2 = pmbus_read_byte_data(client, page, PMBUS_STATUS_CML); if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND)) return -EINVAL; } return 0; } bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg) { int rv; struct pmbus_data *data = i2c_get_clientdata(client); rv = pmbus_read_byte_data(client, page, reg); if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) rv = pmbus_check_status_cml(client, page); pmbus_clear_fault_page(client, page); return rv >= 0; } EXPORT_SYMBOL_GPL(pmbus_check_byte_register); bool pmbus_check_word_register(struct i2c_client *client, int page, int reg) { int rv; struct pmbus_data *data = i2c_get_clientdata(client); rv = pmbus_read_word_data(client, page, reg); if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) rv = pmbus_check_status_cml(client, page); pmbus_clear_fault_page(client, page); return rv >= 0; } EXPORT_SYMBOL_GPL(pmbus_check_word_register); const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client) { struct pmbus_data *data = i2c_get_clientdata(client); return data->info; } EXPORT_SYMBOL_GPL(pmbus_get_driver_info); /* * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if * a device specific mapping funcion exists and calls it if necessary. */ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg) { struct pmbus_data *data = i2c_get_clientdata(client); const struct pmbus_driver_info *info = data->info; int status; if (info->read_byte_data) { status = info->read_byte_data(client, page, reg); if (status != -ENODATA) return status; } return pmbus_read_byte_data(client, page, reg); } static struct pmbus_data *pmbus_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct pmbus_data *data = i2c_get_clientdata(client); const struct pmbus_driver_info *info = data->info; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { int i; for (i = 0; i < info->pages; i++) data->status[PB_STATUS_BASE + i] = pmbus_read_byte_data(client, i, PMBUS_STATUS_BYTE); for (i = 0; i < info->pages; i++) { if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT)) continue; data->status[PB_STATUS_VOUT_BASE + i] = _pmbus_read_byte_data(client, i, PMBUS_STATUS_VOUT); } for (i = 0; i < info->pages; i++) { if (!(info->func[i] & PMBUS_HAVE_STATUS_IOUT)) continue; data->status[PB_STATUS_IOUT_BASE + i] = _pmbus_read_byte_data(client, i, PMBUS_STATUS_IOUT); } for (i = 0; i < info->pages; i++) { if (!(info->func[i] & PMBUS_HAVE_STATUS_TEMP)) continue; data->status[PB_STATUS_TEMP_BASE + i] = _pmbus_read_byte_data(client, i, PMBUS_STATUS_TEMPERATURE); } for (i = 0; i < info->pages; i++) { if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN12)) continue; data->status[PB_STATUS_FAN_BASE + i] = _pmbus_read_byte_data(client, i, PMBUS_STATUS_FAN_12); } for (i = 0; i < info->pages; i++) { if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN34)) continue; data->status[PB_STATUS_FAN34_BASE + i] = _pmbus_read_byte_data(client, i, PMBUS_STATUS_FAN_34); } if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) data->status[PB_STATUS_INPUT_BASE] = _pmbus_read_byte_data(client, 0, PMBUS_STATUS_INPUT); for (i = 0; i < data->num_sensors; i++) { struct pmbus_sensor *sensor = &data->sensors[i]; if (!data->valid || sensor->update) sensor->data = pmbus_read_word_data(client, sensor->page, sensor->reg); } pmbus_clear_faults(client); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* * Convert linear sensor values to milli- or micro-units * depending on sensor type. */ static long pmbus_reg2data_linear(struct pmbus_data *data, struct pmbus_sensor *sensor) { s16 exponent; s32 mantissa; long val; if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */ exponent = data->exponent; mantissa = (u16) sensor->data; } else { /* LINEAR11 */ exponent = (sensor->data >> 11) & 0x001f; mantissa = sensor->data & 0x07ff; if (exponent > 0x0f) exponent |= 0xffe0; /* sign extend exponent */ if (mantissa > 0x03ff) mantissa |= 0xfffff800; /* sign extend mantissa */ } val = mantissa; /* scale result to milli-units for all sensors except fans */ if (sensor->class != PSC_FAN) val = val * 1000L; /* scale result to micro-units for power sensors */ if (sensor->class == PSC_POWER) val = val * 1000L; if (exponent >= 0) val <<= exponent; else val >>= -exponent; return val; } /* * Convert direct sensor values to milli- or micro-units * depending on sensor type. */ static long pmbus_reg2data_direct(struct pmbus_data *data, struct pmbus_sensor *sensor) { long val = (s16) sensor->data; long m, b, R; m = data->info->m[sensor->class]; b = data->info->b[sensor->class]; R = data->info->R[sensor->class]; if (m == 0) return 0; /* X = 1/m * (Y * 10^-R - b) */ R = -R; /* scale result to milli-units for everything but fans */ if (sensor->class != PSC_FAN) { R += 3; b *= 1000; } /* scale result to micro-units for power sensors */ if (sensor->class == PSC_POWER) { R += 3; b *= 1000; } while (R > 0) { val *= 10; R--; } while (R < 0) { val = DIV_ROUND_CLOSEST(val, 10); R++; } return (val - b) / m; } static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor) { long val; if (data->info->direct[sensor->class]) val = pmbus_reg2data_direct(data, sensor); else val = pmbus_reg2data_linear(data, sensor); return val; } #define MAX_MANTISSA (1023 * 1000) #define MIN_MANTISSA (511 * 1000) static u16 pmbus_data2reg_linear(struct pmbus_data *data, enum pmbus_sensor_classes class, long val) { s16 exponent = 0, mantissa; bool negative = false; /* simple case */ if (val == 0) return 0; if (class == PSC_VOLTAGE_OUT) { /* LINEAR16 does not support negative voltages */ if (val < 0) return 0; /* * For a static exponents, we don't have a choice * but to adjust the value to it. */ if (data->exponent < 0) val <<= -data->exponent; else val >>= data->exponent; val = DIV_ROUND_CLOSEST(val, 1000); return val & 0xffff; } if (val < 0) { negative = true; val = -val; } /* Power is in uW. Convert to mW before converting. */ if (class == PSC_POWER) val = DIV_ROUND_CLOSEST(val, 1000L); /* * For simplicity, convert fan data to milli-units * before calculating the exponent. */ if (class == PSC_FAN) val = val * 1000; /* Reduce large mantissa until it fits into 10 bit */ while (val >= MAX_MANTISSA && exponent < 15) { exponent++; val >>= 1; } /* Increase small mantissa to improve precision */ while (val < MIN_MANTISSA && exponent > -15) { exponent--; val <<= 1; } /* Convert mantissa from milli-units to units */ mantissa = DIV_ROUND_CLOSEST(val, 1000); /* Ensure that resulting number is within range */ if (mantissa > 0x3ff) mantissa = 0x3ff; /* restore sign */ if (negative) mantissa = -mantissa; /* Convert to 5 bit exponent, 11 bit mantissa */ return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800); } static u16 pmbus_data2reg_direct(struct pmbus_data *data, enum pmbus_sensor_classes class, long val) { long m, b, R; m = data->info->m[class]; b = data->info->b[class]; R = data->info->R[class]; /* Power is in uW. Adjust R and b. */ if (class == PSC_POWER) { R -= 3; b *= 1000; } /* Calculate Y = (m * X + b) * 10^R */ if (class != PSC_FAN) { R -= 3; /* Adjust R and b for data in milli-units */ b *= 1000; } val = val * m + b; while (R > 0) { val *= 10; R--; } while (R < 0) { val = DIV_ROUND_CLOSEST(val, 10); R++; } return val; } static u16 pmbus_data2reg(struct pmbus_data *data, enum pmbus_sensor_classes class, long val) { u16 regval; if (data->info->direct[class]) regval = pmbus_data2reg_direct(data, class, val); else regval = pmbus_data2reg_linear(data, class, val); return regval; } /* * Return boolean calculated from converted data. * <index> defines a status register index and mask, and optionally * two sensor indexes. * The upper half-word references the two sensors, * two sensor indices. * The upper half-word references the two optional sensors, * the lower half word references status register and mask. * The function returns true if (status[reg] & mask) is true and, * if specified, if v1 >= v2. * To determine if an object exceeds upper limits, specify <v, limit>. * To determine if an object exceeds lower limits, specify <limit, v>. * * For booleans created with pmbus_add_boolean_reg(), only the lower 16 bits of * index are set. s1 and s2 (the sensor index values) are zero in this case. * The function returns true if (status[reg] & mask) is true. * * If the boolean was created with pmbus_add_boolean_cmp(), a comparison against * a specified limit has to be performed to determine the boolean result. * In this case, the function returns true if v1 >= v2 (where v1 and v2 are * sensor values referenced by sensor indices s1 and s2). * * To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>. * To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>. * * If a negative value is stored in any of the referenced registers, this value * reflects an error code which will be returned. */ static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val) { u8 s1 = (index >> 24) & 0xff; u8 s2 = (index >> 16) & 0xff; u8 reg = (index >> 8) & 0xff; u8 mask = index & 0xff; int status; u8 regval; status = data->status[reg]; if (status < 0) return status; regval = status & mask; if (!s1 && !s2) *val = !!regval; else { long v1, v2; struct pmbus_sensor *sensor1, *sensor2; sensor1 = &data->sensors[s1]; if (sensor1->data < 0) return sensor1->data; sensor2 = &data->sensors[s2]; if (sensor2->data < 0) return sensor2->data; v1 = pmbus_reg2data(data, sensor1); v2 = pmbus_reg2data(data, sensor2); *val = !!(regval && v1 >= v2); } return 0; } static ssize_t pmbus_show_boolean(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct pmbus_data *data = pmbus_update_device(dev); int val; int err; err = pmbus_get_boolean(data, attr->index, &val); if (err) return err; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t pmbus_show_sensor(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct pmbus_data *data = pmbus_update_device(dev); struct pmbus_sensor *sensor; sensor = &data->sensors[attr->index]; if (sensor->data < 0) return sensor->data; return snprintf(buf, PAGE_SIZE, "%ld\n", pmbus_reg2data(data, sensor)); } static ssize_t pmbus_set_sensor(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct pmbus_data *data = i2c_get_clientdata(client); struct pmbus_sensor *sensor = &data->sensors[attr->index]; ssize_t rv = count; long val = 0; int ret; u16 regval; if (strict_strtol(buf, 10, &val) < 0) return -EINVAL; mutex_lock(&data->update_lock); regval = pmbus_data2reg(data, sensor->class, val); ret = pmbus_write_word_data(client, sensor->page, sensor->reg, regval); if (ret < 0) rv = ret; else data->sensors[attr->index].data = regval; mutex_unlock(&data->update_lock); return rv; } static ssize_t pmbus_show_label(struct device *dev, struct device_attribute *da, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct pmbus_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); return snprintf(buf, PAGE_SIZE, "%s\n", data->labels[attr->index].label); } #define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \ do { \ struct sensor_device_attribute *a \ = &data->_type##s[data->num_##_type##s].attribute; \ BUG_ON(data->num_attributes >= data->max_attributes); \ sysfs_attr_init(&a->dev_attr.attr); \ a->dev_attr.attr.name = _name; \ a->dev_attr.attr.mode = _mode; \ a->dev_attr.show = _show; \ a->dev_attr.store = _set; \ a->index = _idx; \ data->attributes[data->num_attributes] = &a->dev_attr.attr; \ data->num_attributes++; \ } while (0) #define PMBUS_ADD_GET_ATTR(data, _name, _type, _idx) \ PMBUS_ADD_ATTR(data, _name, _idx, S_IRUGO, _type, \ pmbus_show_##_type, NULL) #define PMBUS_ADD_SET_ATTR(data, _name, _type, _idx) \ PMBUS_ADD_ATTR(data, _name, _idx, S_IWUSR | S_IRUGO, _type, \ pmbus_show_##_type, pmbus_set_##_type) static void pmbus_add_boolean(struct pmbus_data *data, const char *name, const char *type, int seq, int idx) { struct pmbus_boolean *boolean; BUG_ON(data->num_booleans >= data->max_booleans); boolean = &data->booleans[data->num_booleans]; snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s", name, seq, type); PMBUS_ADD_GET_ATTR(data, boolean->name, boolean, idx); data->num_booleans++; } static void pmbus_add_boolean_reg(struct pmbus_data *data, const char *name, const char *type, int seq, int reg, int bit) { pmbus_add_boolean(data, name, type, seq, (reg << 8) | bit); } static void pmbus_add_boolean_cmp(struct pmbus_data *data, const char *name, const char *type, int seq, int i1, int i2, int reg, int mask) { pmbus_add_boolean(data, name, type, seq, (i1 << 24) | (i2 << 16) | (reg << 8) | mask); } static void pmbus_add_sensor(struct pmbus_data *data, const char *name, const char *type, int seq, int page, int reg, enum pmbus_sensor_classes class, bool update, bool readonly) { struct pmbus_sensor *sensor; BUG_ON(data->num_sensors >= data->max_sensors); sensor = &data->sensors[data->num_sensors]; snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s", name, seq, type); sensor->page = page; sensor->reg = reg; sensor->class = class; sensor->update = update; if (readonly) PMBUS_ADD_GET_ATTR(data, sensor->name, sensor, data->num_sensors); else PMBUS_ADD_SET_ATTR(data, sensor->name, sensor, data->num_sensors); data->num_sensors++; } static void pmbus_add_label(struct pmbus_data *data, const char *name, int seq, const char *lstring, int index) { struct pmbus_label *label; BUG_ON(data->num_labels >= data->max_labels); label = &data->labels[data->num_labels]; snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq); if (!index) strncpy(label->label, lstring, sizeof(label->label) - 1); else snprintf(label->label, sizeof(label->label), "%s%d", lstring, index); PMBUS_ADD_GET_ATTR(data, label->name, label, data->num_labels); data->num_labels++; } /* * Determine maximum number of sensors, booleans, and labels. * To keep things simple, only make a rough high estimate. */ static void pmbus_find_max_attr(struct i2c_client *client, struct pmbus_data *data) { const struct pmbus_driver_info *info = data->info; int page, max_sensors, max_booleans, max_labels; max_sensors = PMBUS_MAX_INPUT_SENSORS; max_booleans = PMBUS_MAX_INPUT_BOOLEANS; max_labels = PMBUS_MAX_INPUT_LABELS; for (page = 0; page < info->pages; page++) { if (info->func[page] & PMBUS_HAVE_VOUT) { max_sensors += PMBUS_VOUT_SENSORS_PER_PAGE; max_booleans += PMBUS_VOUT_BOOLEANS_PER_PAGE; max_labels++; } if (info->func[page] & PMBUS_HAVE_IOUT) { max_sensors += PMBUS_IOUT_SENSORS_PER_PAGE; max_booleans += PMBUS_IOUT_BOOLEANS_PER_PAGE; max_labels++; } if (info->func[page] & PMBUS_HAVE_POUT) { max_sensors += PMBUS_POUT_SENSORS_PER_PAGE; max_booleans += PMBUS_POUT_BOOLEANS_PER_PAGE; max_labels++; } if (info->func[page] & PMBUS_HAVE_FAN12) { max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN; max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN; } if (info->func[page] & PMBUS_HAVE_FAN34) { max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN; max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN; } if (info->func[page] & PMBUS_HAVE_TEMP) { max_sensors += PMBUS_MAX_SENSORS_PER_TEMP; max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP; } if (info->func[page] & PMBUS_HAVE_TEMP2) { max_sensors += PMBUS_MAX_SENSORS_PER_TEMP; max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP; } if (info->func[page] & PMBUS_HAVE_TEMP3) { max_sensors += PMBUS_MAX_SENSORS_PER_TEMP; max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP; } } data->max_sensors = max_sensors; data->max_booleans = max_booleans; data->max_labels = max_labels; data->max_attributes = max_sensors + max_booleans + max_labels; } /* * Search for attributes. Allocate sensors, booleans, and labels as needed. */ /* * The pmbus_limit_attr structure describes a single limit attribute * and its associated alarm attribute. */ struct pmbus_limit_attr { u8 reg; /* Limit register */ const char *attr; /* Attribute name */ const char *alarm; /* Alarm attribute name */ u32 sbit; /* Alarm attribute status bit */ }; /* * The pmbus_sensor_attr structure describes one sensor attribute. This * description includes a reference to the associated limit attributes. */ struct pmbus_sensor_attr { u8 reg; /* sensor register */ enum pmbus_sensor_classes class;/* sensor class */ const char *label; /* sensor label */ bool paged; /* true if paged sensor */ bool update; /* true if update needed */ bool compare; /* true if compare function needed */ u32 func; /* sensor mask */ u32 sfunc; /* sensor status mask */ int sbase; /* status base register */ u32 gbit; /* generic status bit */ const struct pmbus_limit_attr *limit;/* limit registers */ int nlimit; /* # of limit registers */ }; /* * Add a set of limit attributes and, if supported, the associated * alarm attributes. */ static bool pmbus_add_limit_attrs(struct i2c_client *client, struct pmbus_data *data, const struct pmbus_driver_info *info, const char *name, int index, int page, int cbase, const struct pmbus_sensor_attr *attr) { const struct pmbus_limit_attr *l = attr->limit; int nlimit = attr->nlimit; bool have_alarm = false; int i, cindex; for (i = 0; i < nlimit; i++) { if (pmbus_check_word_register(client, page, l->reg)) { cindex = data->num_sensors; pmbus_add_sensor(data, name, l->attr, index, page, l->reg, attr->class, attr->update, false); if (info->func[page] & attr->sfunc) { if (attr->compare) { pmbus_add_boolean_cmp(data, name, l->alarm, index, cbase, cindex, attr->sbase + page, l->sbit); } else { pmbus_add_boolean_reg(data, name, l->alarm, index, attr->sbase + page, l->sbit); } have_alarm = true; } } l++; } return have_alarm; } static void pmbus_add_sensor_attrs_one(struct i2c_client *client, struct pmbus_data *data, const struct pmbus_driver_info *info, const char *name, int index, int page, const struct pmbus_sensor_attr *attr) { bool have_alarm; int cbase = data->num_sensors; if (attr->label) pmbus_add_label(data, name, index, attr->label, attr->paged ? page + 1 : 0); pmbus_add_sensor(data, name, "input", index, page, attr->reg, attr->class, true, true); if (attr->sfunc) { have_alarm = pmbus_add_limit_attrs(client, data, info, name, index, page, cbase, attr); /* * Add generic alarm attribute only if there are no individual * alarm attributes, and if there is a global alarm bit. */ if (!have_alarm && attr->gbit) pmbus_add_boolean_reg(data, name, "alarm", index, PB_STATUS_BASE + page, attr->gbit); } } static void pmbus_add_sensor_attrs(struct i2c_client *client, struct pmbus_data *data, const char *name, const struct pmbus_sensor_attr *attrs, int nattrs) { const struct pmbus_driver_info *info = data->info; int index, i; index = 1; for (i = 0; i < nattrs; i++) { int page, pages; pages = attrs->paged ? info->pages : 1; for (page = 0; page < pages; page++) { if (!(info->func[page] & attrs->func)) continue; pmbus_add_sensor_attrs_one(client, data, info, name, index, page, attrs); index++; } attrs++; } } static const struct pmbus_limit_attr vin_limit_attrs[] = { { .reg = PMBUS_VIN_UV_WARN_LIMIT, .attr = "min", .alarm = "min_alarm", .sbit = PB_VOLTAGE_UV_WARNING, }, { .reg = PMBUS_VIN_UV_FAULT_LIMIT, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_VOLTAGE_UV_FAULT, }, { .reg = PMBUS_VIN_OV_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_VOLTAGE_OV_WARNING, }, { .reg = PMBUS_VIN_OV_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_VOLTAGE_OV_FAULT, }, }; static const struct pmbus_limit_attr vout_limit_attrs[] = { { .reg = PMBUS_VOUT_UV_WARN_LIMIT, .attr = "min", .alarm = "min_alarm", .sbit = PB_VOLTAGE_UV_WARNING, }, { .reg = PMBUS_VOUT_UV_FAULT_LIMIT, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_VOLTAGE_UV_FAULT, }, { .reg = PMBUS_VOUT_OV_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_VOLTAGE_OV_WARNING, }, { .reg = PMBUS_VOUT_OV_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_VOLTAGE_OV_FAULT, } }; static const struct pmbus_sensor_attr voltage_attributes[] = { { .reg = PMBUS_READ_VIN, .class = PSC_VOLTAGE_IN, .label = "vin", .func = PMBUS_HAVE_VIN, .sfunc = PMBUS_HAVE_STATUS_INPUT, .sbase = PB_STATUS_INPUT_BASE, .gbit = PB_STATUS_VIN_UV, .limit = vin_limit_attrs, .nlimit = ARRAY_SIZE(vin_limit_attrs), }, { .reg = PMBUS_READ_VCAP, .class = PSC_VOLTAGE_IN, .label = "vcap", .func = PMBUS_HAVE_VCAP, }, { .reg = PMBUS_READ_VOUT, .class = PSC_VOLTAGE_OUT, .label = "vout", .paged = true, .func = PMBUS_HAVE_VOUT, .sfunc = PMBUS_HAVE_STATUS_VOUT, .sbase = PB_STATUS_VOUT_BASE, .gbit = PB_STATUS_VOUT_OV, .limit = vout_limit_attrs, .nlimit = ARRAY_SIZE(vout_limit_attrs), } }; /* Current attributes */ static const struct pmbus_limit_attr iin_limit_attrs[] = { { .reg = PMBUS_IIN_OC_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_IIN_OC_WARNING, }, { .reg = PMBUS_IIN_OC_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_IIN_OC_FAULT, } }; static const struct pmbus_limit_attr iout_limit_attrs[] = { { .reg = PMBUS_IOUT_OC_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_IOUT_OC_WARNING, }, { .reg = PMBUS_IOUT_UC_FAULT_LIMIT, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_IOUT_UC_FAULT, }, { .reg = PMBUS_IOUT_OC_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_IOUT_OC_FAULT, } }; static const struct pmbus_sensor_attr current_attributes[] = { { .reg = PMBUS_READ_IIN, .class = PSC_CURRENT_IN, .label = "iin", .func = PMBUS_HAVE_IIN, .sfunc = PMBUS_HAVE_STATUS_INPUT, .sbase = PB_STATUS_INPUT_BASE, .limit = iin_limit_attrs, .nlimit = ARRAY_SIZE(iin_limit_attrs), }, { .reg = PMBUS_READ_IOUT, .class = PSC_CURRENT_OUT, .label = "iout", .paged = true, .func = PMBUS_HAVE_IOUT, .sfunc = PMBUS_HAVE_STATUS_IOUT, .sbase = PB_STATUS_IOUT_BASE, .gbit = PB_STATUS_IOUT_OC, .limit = iout_limit_attrs, .nlimit = ARRAY_SIZE(iout_limit_attrs), } }; /* Power attributes */ static const struct pmbus_limit_attr pin_limit_attrs[] = { { .reg = PMBUS_PIN_OP_WARN_LIMIT, .attr = "max", .alarm = "alarm", .sbit = PB_PIN_OP_WARNING, } }; static const struct pmbus_limit_attr pout_limit_attrs[] = { { .reg = PMBUS_POUT_MAX, .attr = "cap", .alarm = "cap_alarm", .sbit = PB_POWER_LIMITING, }, { .reg = PMBUS_POUT_OP_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_POUT_OP_WARNING, }, { .reg = PMBUS_POUT_OP_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_POUT_OP_FAULT, } }; static const struct pmbus_sensor_attr power_attributes[] = { { .reg = PMBUS_READ_PIN, .class = PSC_POWER, .label = "pin", .func = PMBUS_HAVE_PIN, .sfunc = PMBUS_HAVE_STATUS_INPUT, .sbase = PB_STATUS_INPUT_BASE, .limit = pin_limit_attrs, .nlimit = ARRAY_SIZE(pin_limit_attrs), }, { .reg = PMBUS_READ_POUT, .class = PSC_POWER, .label = "pout", .paged = true, .func = PMBUS_HAVE_POUT, .sfunc = PMBUS_HAVE_STATUS_IOUT, .sbase = PB_STATUS_IOUT_BASE, .limit = pout_limit_attrs, .nlimit = ARRAY_SIZE(pout_limit_attrs), } }; /* Temperature atributes */ static const struct pmbus_limit_attr temp_limit_attrs[] = { { .reg = PMBUS_UT_WARN_LIMIT, .attr = "min", .alarm = "min_alarm", .sbit = PB_TEMP_UT_WARNING, }, { .reg = PMBUS_UT_FAULT_LIMIT, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_TEMP_UT_FAULT, }, { .reg = PMBUS_OT_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_TEMP_OT_WARNING, }, { .reg = PMBUS_OT_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_TEMP_OT_FAULT, } }; static const struct pmbus_sensor_attr temp_attributes[] = { { .reg = PMBUS_READ_TEMPERATURE_1, .class = PSC_TEMPERATURE, .paged = true, .update = true, .compare = true, .func = PMBUS_HAVE_TEMP, .sfunc = PMBUS_HAVE_STATUS_TEMP, .sbase = PB_STATUS_TEMP_BASE, .gbit = PB_STATUS_TEMPERATURE, .limit = temp_limit_attrs, .nlimit = ARRAY_SIZE(temp_limit_attrs), }, { .reg = PMBUS_READ_TEMPERATURE_2, .class = PSC_TEMPERATURE, .paged = true, .update = true, .compare = true, .func = PMBUS_HAVE_TEMP2, .sfunc = PMBUS_HAVE_STATUS_TEMP, .sbase = PB_STATUS_TEMP_BASE, .gbit = PB_STATUS_TEMPERATURE, .limit = temp_limit_attrs, .nlimit = ARRAY_SIZE(temp_limit_attrs), }, { .reg = PMBUS_READ_TEMPERATURE_3, .class = PSC_TEMPERATURE, .paged = true, .update = true, .compare = true, .func = PMBUS_HAVE_TEMP3, .sfunc = PMBUS_HAVE_STATUS_TEMP, .sbase = PB_STATUS_TEMP_BASE, .gbit = PB_STATUS_TEMPERATURE, .limit = temp_limit_attrs, .nlimit = ARRAY_SIZE(temp_limit_attrs), } }; static const int pmbus_fan_registers[] = { PMBUS_READ_FAN_SPEED_1, PMBUS_READ_FAN_SPEED_2, PMBUS_READ_FAN_SPEED_3, PMBUS_READ_FAN_SPEED_4 }; static const int pmbus_fan_config_registers[] = { PMBUS_FAN_CONFIG_12, PMBUS_FAN_CONFIG_12, PMBUS_FAN_CONFIG_34, PMBUS_FAN_CONFIG_34 }; static const int pmbus_fan_status_registers[] = { PMBUS_STATUS_FAN_12, PMBUS_STATUS_FAN_12, PMBUS_STATUS_FAN_34, PMBUS_STATUS_FAN_34 }; static const u32 pmbus_fan_flags[] = { PMBUS_HAVE_FAN12, PMBUS_HAVE_FAN12, PMBUS_HAVE_FAN34, PMBUS_HAVE_FAN34 }; static const u32 pmbus_fan_status_flags[] = { PMBUS_HAVE_STATUS_FAN12, PMBUS_HAVE_STATUS_FAN12, PMBUS_HAVE_STATUS_FAN34, PMBUS_HAVE_STATUS_FAN34 }; /* Fans */ static void pmbus_add_fan_attributes(struct i2c_client *client, struct pmbus_data *data) { const struct pmbus_driver_info *info = data->info; int index = 1; int page; for (page = 0; page < info->pages; page++) { int f; for (f = 0; f < ARRAY_SIZE(pmbus_fan_registers); f++) { int regval; if (!(info->func[page] & pmbus_fan_flags[f])) break; if (!pmbus_check_word_register(client, page, pmbus_fan_registers[f])) break; /* * Skip fan if not installed. * Each fan configuration register covers multiple fans, * so we have to do some magic. */ regval = _pmbus_read_byte_data(client, page, pmbus_fan_config_registers[f]); if (regval < 0 || (!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4))))) continue; pmbus_add_sensor(data, "fan", "input", index, page, pmbus_fan_registers[f], PSC_FAN, true, true); /* * Each fan status register covers multiple fans, * so we have to do some magic. */ if ((info->func[page] & pmbus_fan_status_flags[f]) && pmbus_check_byte_register(client, page, pmbus_fan_status_registers[f])) { int base; if (f > 1) /* fan 3, 4 */ base = PB_STATUS_FAN34_BASE + page; else base = PB_STATUS_FAN_BASE + page; pmbus_add_boolean_reg(data, "fan", "alarm", index, base, PB_FAN_FAN1_WARNING >> (f & 1)); pmbus_add_boolean_reg(data, "fan", "fault", index, base, PB_FAN_FAN1_FAULT >> (f & 1)); } index++; } } } static void pmbus_find_attributes(struct i2c_client *client, struct pmbus_data *data) { /* Voltage sensors */ pmbus_add_sensor_attrs(client, data, "in", voltage_attributes, ARRAY_SIZE(voltage_attributes)); /* Current sensors */ pmbus_add_sensor_attrs(client, data, "curr", current_attributes, ARRAY_SIZE(current_attributes)); /* Power sensors */ pmbus_add_sensor_attrs(client, data, "power", power_attributes, ARRAY_SIZE(power_attributes)); /* Temperature sensors */ pmbus_add_sensor_attrs(client, data, "temp", temp_attributes, ARRAY_SIZE(temp_attributes)); /* Fans */ pmbus_add_fan_attributes(client, data); } /* * Identify chip parameters. * This function is called for all chips. */ static int pmbus_identify_common(struct i2c_client *client, struct pmbus_data *data) { int vout_mode = -1, exponent; if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); if (vout_mode >= 0 && vout_mode != 0xff) { /* * Not all chips support the VOUT_MODE command, * so a failure to read it is not an error. */ switch (vout_mode >> 5) { case 0: /* linear mode */ if (data->info->direct[PSC_VOLTAGE_OUT]) return -ENODEV; exponent = vout_mode & 0x1f; /* and sign-extend it */ if (exponent & 0x10) exponent |= ~0x1f; data->exponent = exponent; break; case 2: /* direct mode */ if (!data->info->direct[PSC_VOLTAGE_OUT]) return -ENODEV; break; default: return -ENODEV; } } /* Determine maximum number of sensors, booleans, and labels */ pmbus_find_max_attr(client, data); pmbus_clear_fault_page(client, 0); return 0; } int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id, struct pmbus_driver_info *info) { const struct pmbus_platform_data *pdata = client->dev.platform_data; struct pmbus_data *data; int ret; if (!info) { dev_err(&client->dev, "Missing chip information"); return -ENODEV; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { dev_err(&client->dev, "No memory to allocate driver data\n"); return -ENOMEM; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Bail out if PMBus status register does not exist. */ if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) { dev_err(&client->dev, "PMBus status register not found\n"); ret = -ENODEV; goto out_data; } if (pdata) data->flags = pdata->flags; data->info = info; pmbus_clear_faults(client); if (info->identify) { ret = (*info->identify)(client, info); if (ret < 0) { dev_err(&client->dev, "Chip identification failed\n"); goto out_data; } } if (info->pages <= 0 || info->pages > PMBUS_PAGES) { dev_err(&client->dev, "Bad number of PMBus pages: %d\n", info->pages); ret = -EINVAL; goto out_data; } /* * Bail out if more than one page was configured, but we can not * select the highest page. This is an indication that the wrong * chip type was selected. Better bail out now than keep * returning errors later on. */ if (info->pages > 1 && pmbus_set_page(client, info->pages - 1) < 0) { dev_err(&client->dev, "Failed to select page %d\n", info->pages - 1); ret = -EINVAL; goto out_data; } ret = pmbus_identify_common(client, data); if (ret < 0) { dev_err(&client->dev, "Failed to identify chip capabilities\n"); goto out_data; } ret = -ENOMEM; data->sensors = kzalloc(sizeof(struct pmbus_sensor) * data->max_sensors, GFP_KERNEL); if (!data->sensors) { dev_err(&client->dev, "No memory to allocate sensor data\n"); goto out_data; } data->booleans = kzalloc(sizeof(struct pmbus_boolean) * data->max_booleans, GFP_KERNEL); if (!data->booleans) { dev_err(&client->dev, "No memory to allocate boolean data\n"); goto out_sensors; } data->labels = kzalloc(sizeof(struct pmbus_label) * data->max_labels, GFP_KERNEL); if (!data->labels) { dev_err(&client->dev, "No memory to allocate label data\n"); goto out_booleans; } data->attributes = kzalloc(sizeof(struct attribute *) * data->max_attributes, GFP_KERNEL); if (!data->attributes) { dev_err(&client->dev, "No memory to allocate attribute data\n"); goto out_labels; } pmbus_find_attributes(client, data); /* * If there are no attributes, something is wrong. * Bail out instead of trying to register nothing. */ if (!data->num_attributes) { dev_err(&client->dev, "No attributes found\n"); ret = -ENODEV; goto out_attributes; } /* Register sysfs hooks */ data->group.attrs = data->attributes; ret = sysfs_create_group(&client->dev.kobj, &data->group); if (ret) { dev_err(&client->dev, "Failed to create sysfs entries\n"); goto out_attributes; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); dev_err(&client->dev, "Failed to register hwmon device\n"); goto out_hwmon_device_register; } return 0; out_hwmon_device_register: sysfs_remove_group(&client->dev.kobj, &data->group); out_attributes: kfree(data->attributes); out_labels: kfree(data->labels); out_booleans: kfree(data->booleans); out_sensors: kfree(data->sensors); out_data: kfree(data); return ret; } EXPORT_SYMBOL_GPL(pmbus_do_probe); int pmbus_do_remove(struct i2c_client *client) { struct pmbus_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &data->group); kfree(data->attributes); kfree(data->labels); kfree(data->booleans); kfree(data->sensors); kfree(data); return 0; } EXPORT_SYMBOL_GPL(pmbus_do_remove); MODULE_AUTHOR("Guenter Roeck"); MODULE_DESCRIPTION("PMBus core driver"); MODULE_LICENSE("GPL");
gpl-2.0
openwrt/bcm63xx-next
drivers/net/wireless/prism54/isl_38xx.c
1839
8047
/* * Copyright (C) 2002 Intersil Americas Inc. * Copyright (C) 2003-2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>_ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/delay.h> #include <asm/uaccess.h> #include <asm/io.h> #include "prismcompat.h" #include "isl_38xx.h" #include "islpci_dev.h" #include "islpci_mgt.h" /****************************************************************************** Device Interface & Control functions ******************************************************************************/ /** * isl38xx_disable_interrupts - disable all interrupts * @device: pci memory base address * * Instructs the device to disable all interrupt reporting by asserting * the IRQ line. New events may still show up in the interrupt identification * register located at offset %ISL38XX_INT_IDENT_REG. */ void isl38xx_disable_interrupts(void __iomem *device) { isl38xx_w32_flush(device, 0x00000000, ISL38XX_INT_EN_REG); udelay(ISL38XX_WRITEIO_DELAY); } void isl38xx_handle_sleep_request(isl38xx_control_block *control_block, int *powerstate, void __iomem *device_base) { /* device requests to go into sleep mode * check whether the transmit queues for data and management are empty */ if (isl38xx_in_queue(control_block, ISL38XX_CB_TX_DATA_LQ)) /* data tx queue not empty */ return; if (isl38xx_in_queue(control_block, ISL38XX_CB_TX_MGMTQ)) /* management tx queue not empty */ return; /* check also whether received frames are pending */ if (isl38xx_in_queue(control_block, ISL38XX_CB_RX_DATA_LQ)) /* data rx queue not empty */ return; if (isl38xx_in_queue(control_block, ISL38XX_CB_RX_MGMTQ)) /* management rx queue not empty */ return; #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Device going to sleep mode\n"); #endif /* all queues are empty, allow the device to go into sleep mode */ *powerstate = ISL38XX_PSM_POWERSAVE_STATE; /* assert the Sleep interrupt in the Device Interrupt Register */ isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_SLEEP, ISL38XX_DEV_INT_REG); udelay(ISL38XX_WRITEIO_DELAY); } void isl38xx_handle_wakeup(isl38xx_control_block *control_block, int *powerstate, void __iomem *device_base) { /* device is in active state, update the powerstate flag */ *powerstate = ISL38XX_PSM_ACTIVE_STATE; /* now check whether there are frames pending for the card */ if (!isl38xx_in_queue(control_block, ISL38XX_CB_TX_DATA_LQ) && !isl38xx_in_queue(control_block, ISL38XX_CB_TX_MGMTQ)) return; #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_ANYTHING, "Wake up handler trigger the device\n"); #endif /* either data or management transmit queue has a frame pending * trigger the device by setting the Update bit in the Device Int reg */ isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_UPDATE, ISL38XX_DEV_INT_REG); udelay(ISL38XX_WRITEIO_DELAY); } void isl38xx_trigger_device(int asleep, void __iomem *device_base) { u32 reg; #if VERBOSE > SHOW_ERROR_MESSAGES u32 counter = 0; struct timeval current_time; DEBUG(SHOW_FUNCTION_CALLS, "isl38xx trigger device\n"); #endif /* check whether the device is in power save mode */ if (asleep) { /* device is in powersave, trigger the device for wakeup */ #if VERBOSE > SHOW_ERROR_MESSAGES do_gettimeofday(&current_time); DEBUG(SHOW_TRACING, "%08li.%08li Device wakeup triggered\n", current_time.tv_sec, (long)current_time.tv_usec); DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n", current_time.tv_sec, (long)current_time.tv_usec, readl(device_base + ISL38XX_CTRL_STAT_REG)); #endif reg = readl(device_base + ISL38XX_INT_IDENT_REG); if (reg == 0xabadface) { #if VERBOSE > SHOW_ERROR_MESSAGES do_gettimeofday(&current_time); DEBUG(SHOW_TRACING, "%08li.%08li Device register abadface\n", current_time.tv_sec, (long)current_time.tv_usec); #endif /* read the Device Status Register until Sleepmode bit is set */ while (reg = readl(device_base + ISL38XX_CTRL_STAT_REG), (reg & ISL38XX_CTRL_STAT_SLEEPMODE) == 0) { udelay(ISL38XX_WRITEIO_DELAY); #if VERBOSE > SHOW_ERROR_MESSAGES counter++; #endif } #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n", current_time.tv_sec, (long)current_time.tv_usec, readl(device_base + ISL38XX_CTRL_STAT_REG)); do_gettimeofday(&current_time); DEBUG(SHOW_TRACING, "%08li.%08li Device asleep counter %i\n", current_time.tv_sec, (long)current_time.tv_usec, counter); #endif } /* assert the Wakeup interrupt in the Device Interrupt Register */ isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_WAKEUP, ISL38XX_DEV_INT_REG); #if VERBOSE > SHOW_ERROR_MESSAGES udelay(ISL38XX_WRITEIO_DELAY); /* perform another read on the Device Status Register */ reg = readl(device_base + ISL38XX_CTRL_STAT_REG); do_gettimeofday(&current_time); DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n", current_time.tv_sec, (long)current_time.tv_usec, reg); #endif } else { /* device is (still) awake */ #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Device is in active state\n"); #endif /* trigger the device by setting the Update bit in the Device Int reg */ isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_UPDATE, ISL38XX_DEV_INT_REG); } } void isl38xx_interface_reset(void __iomem *device_base, dma_addr_t host_address) { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_FUNCTION_CALLS, "isl38xx_interface_reset\n"); #endif /* load the address of the control block in the device */ isl38xx_w32_flush(device_base, host_address, ISL38XX_CTRL_BLK_BASE_REG); udelay(ISL38XX_WRITEIO_DELAY); /* set the reset bit in the Device Interrupt Register */ isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_RESET, ISL38XX_DEV_INT_REG); udelay(ISL38XX_WRITEIO_DELAY); /* enable the interrupt for detecting initialization */ /* Note: Do not enable other interrupts here. We want the * device to have come up first 100% before allowing any other * interrupts. */ isl38xx_w32_flush(device_base, ISL38XX_INT_IDENT_INIT, ISL38XX_INT_EN_REG); udelay(ISL38XX_WRITEIO_DELAY); /* allow complete full reset */ } void isl38xx_enable_common_interrupts(void __iomem *device_base) { u32 reg; reg = ISL38XX_INT_IDENT_UPDATE | ISL38XX_INT_IDENT_SLEEP | ISL38XX_INT_IDENT_WAKEUP; isl38xx_w32_flush(device_base, reg, ISL38XX_INT_EN_REG); udelay(ISL38XX_WRITEIO_DELAY); } int isl38xx_in_queue(isl38xx_control_block *cb, int queue) { const s32 delta = (le32_to_cpu(cb->driver_curr_frag[queue]) - le32_to_cpu(cb->device_curr_frag[queue])); /* determine the amount of fragments in the queue depending on the type * of the queue, either transmit or receive */ BUG_ON(delta < 0); /* driver ptr must be ahead of device ptr */ switch (queue) { /* send queues */ case ISL38XX_CB_TX_MGMTQ: BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE); case ISL38XX_CB_TX_DATA_LQ: case ISL38XX_CB_TX_DATA_HQ: BUG_ON(delta > ISL38XX_CB_TX_QSIZE); return delta; /* receive queues */ case ISL38XX_CB_RX_MGMTQ: BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE); return ISL38XX_CB_MGMT_QSIZE - delta; case ISL38XX_CB_RX_DATA_LQ: case ISL38XX_CB_RX_DATA_HQ: BUG_ON(delta > ISL38XX_CB_RX_QSIZE); return ISL38XX_CB_RX_QSIZE - delta; } BUG(); return 0; }
gpl-2.0
mifl/android_kernel_pantech_oscar
arch/arm/mach-pxa/corgi_pm.c
4911
6361
/* * Battery and Power Management code for the Sharp SL-C7xx * * Copyright (c) 2005 Richard Purdie * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/gpio-pxa.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/apm-emulation.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <mach/corgi.h> #include <mach/pxa2xx-regs.h> #include <mach/sharpsl_pm.h> #include "generic.h" #define SHARPSL_CHARGE_ON_VOLT 0x99 /* 2.9V */ #define SHARPSL_CHARGE_ON_TEMP 0xe0 /* 2.9V */ #define SHARPSL_CHARGE_ON_ACIN_HIGH 0x9b /* 6V */ #define SHARPSL_CHARGE_ON_ACIN_LOW 0x34 /* 2V */ #define SHARPSL_FATAL_ACIN_VOLT 182 /* 3.45V */ #define SHARPSL_FATAL_NOACIN_VOLT 170 /* 3.40V */ static struct gpio charger_gpios[] = { { CORGI_GPIO_ADC_TEMP_ON, GPIOF_OUT_INIT_LOW, "ADC Temp On" }, { CORGI_GPIO_CHRG_ON, GPIOF_OUT_INIT_LOW, "Charger On" }, { CORGI_GPIO_CHRG_UKN, GPIOF_OUT_INIT_LOW, "Charger Unknown" }, { CORGI_GPIO_AC_IN, GPIOF_IN, "Charger Detection" }, { CORGI_GPIO_KEY_INT, GPIOF_IN, "Key Interrupt" }, { CORGI_GPIO_WAKEUP, GPIOF_IN, "System wakeup notification" }, }; static void corgi_charger_init(void) { gpio_request_array(ARRAY_AND_SIZE(charger_gpios)); } static void corgi_measure_temp(int on) { gpio_set_value(CORGI_GPIO_ADC_TEMP_ON, on); } static void corgi_charge(int on) { if (on) { if (machine_is_corgi() && (sharpsl_pm.flags & SHARPSL_SUSPENDED)) { gpio_set_value(CORGI_GPIO_CHRG_ON, 0); gpio_set_value(CORGI_GPIO_CHRG_UKN, 1); } else { gpio_set_value(CORGI_GPIO_CHRG_ON, 1); gpio_set_value(CORGI_GPIO_CHRG_UKN, 0); } } else { gpio_set_value(CORGI_GPIO_CHRG_ON, 0); gpio_set_value(CORGI_GPIO_CHRG_UKN, 0); } } static void corgi_discharge(int on) { gpio_set_value(CORGI_GPIO_DISCHARGE_ON, on); } static void corgi_presuspend(void) { } static void corgi_postsuspend(void) { } /* * Check what brought us out of the suspend. * Return: 0 to sleep, otherwise wake */ static int corgi_should_wakeup(unsigned int resume_on_alarm) { int is_resume = 0; dev_dbg(sharpsl_pm.dev, "PEDR = %x, GPIO_AC_IN = %d, " "GPIO_CHRG_FULL = %d, GPIO_KEY_INT = %d, GPIO_WAKEUP = %d\n", PEDR, gpio_get_value(CORGI_GPIO_AC_IN), gpio_get_value(CORGI_GPIO_CHRG_FULL), gpio_get_value(CORGI_GPIO_KEY_INT), gpio_get_value(CORGI_GPIO_WAKEUP)); if ((PEDR & GPIO_bit(CORGI_GPIO_AC_IN))) { if (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN)) { /* charge on */ dev_dbg(sharpsl_pm.dev, "ac insert\n"); sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG; } else { /* charge off */ dev_dbg(sharpsl_pm.dev, "ac remove\n"); sharpsl_pm_led(SHARPSL_LED_OFF); sharpsl_pm.machinfo->charge(0); sharpsl_pm.charge_mode = CHRG_OFF; } } if ((PEDR & GPIO_bit(CORGI_GPIO_CHRG_FULL))) dev_dbg(sharpsl_pm.dev, "Charge full interrupt\n"); if (PEDR & GPIO_bit(CORGI_GPIO_KEY_INT)) is_resume |= GPIO_bit(CORGI_GPIO_KEY_INT); if (PEDR & GPIO_bit(CORGI_GPIO_WAKEUP)) is_resume |= GPIO_bit(CORGI_GPIO_WAKEUP); if (resume_on_alarm && (PEDR & PWER_RTC)) is_resume |= PWER_RTC; dev_dbg(sharpsl_pm.dev, "is_resume: %x\n",is_resume); return is_resume; } static unsigned long corgi_charger_wakeup(void) { unsigned long ret; ret = (!gpio_get_value(CORGI_GPIO_AC_IN) << GPIO_bit(CORGI_GPIO_AC_IN)) | (!gpio_get_value(CORGI_GPIO_KEY_INT) << GPIO_bit(CORGI_GPIO_KEY_INT)) | (!gpio_get_value(CORGI_GPIO_WAKEUP) << GPIO_bit(CORGI_GPIO_WAKEUP)); return ret; } unsigned long corgipm_read_devdata(int type) { switch(type) { case SHARPSL_STATUS_ACIN: return !gpio_get_value(CORGI_GPIO_AC_IN); case SHARPSL_STATUS_LOCK: return gpio_get_value(sharpsl_pm.machinfo->gpio_batlock); case SHARPSL_STATUS_CHRGFULL: return gpio_get_value(sharpsl_pm.machinfo->gpio_batfull); case SHARPSL_STATUS_FATAL: return gpio_get_value(sharpsl_pm.machinfo->gpio_fatal); case SHARPSL_ACIN_VOLT: return sharpsl_pm_pxa_read_max1111(MAX1111_ACIN_VOLT); case SHARPSL_BATT_TEMP: return sharpsl_pm_pxa_read_max1111(MAX1111_BATT_TEMP); case SHARPSL_BATT_VOLT: default: return sharpsl_pm_pxa_read_max1111(MAX1111_BATT_VOLT); } } static struct sharpsl_charger_machinfo corgi_pm_machinfo = { .init = corgi_charger_init, .exit = NULL, .gpio_batlock = CORGI_GPIO_BAT_COVER, .gpio_acin = CORGI_GPIO_AC_IN, .gpio_batfull = CORGI_GPIO_CHRG_FULL, .discharge = corgi_discharge, .charge = corgi_charge, .measure_temp = corgi_measure_temp, .presuspend = corgi_presuspend, .postsuspend = corgi_postsuspend, .read_devdata = corgipm_read_devdata, .charger_wakeup = corgi_charger_wakeup, .should_wakeup = corgi_should_wakeup, #if defined(CONFIG_LCD_CORGI) .backlight_limit = corgi_lcd_limit_intensity, #endif .charge_on_volt = SHARPSL_CHARGE_ON_VOLT, .charge_on_temp = SHARPSL_CHARGE_ON_TEMP, .charge_acin_high = SHARPSL_CHARGE_ON_ACIN_HIGH, .charge_acin_low = SHARPSL_CHARGE_ON_ACIN_LOW, .fatal_acin_volt = SHARPSL_FATAL_ACIN_VOLT, .fatal_noacin_volt= SHARPSL_FATAL_NOACIN_VOLT, .bat_levels = 40, .bat_levels_noac = sharpsl_battery_levels_noac, .bat_levels_acin = sharpsl_battery_levels_acin, .status_high_acin = 188, .status_low_acin = 178, .status_high_noac = 185, .status_low_noac = 175, }; static struct platform_device *corgipm_device; static int __devinit corgipm_init(void) { int ret; if (!machine_is_corgi() && !machine_is_shepherd() && !machine_is_husky()) return -ENODEV; corgipm_device = platform_device_alloc("sharpsl-pm", -1); if (!corgipm_device) return -ENOMEM; if (!machine_is_corgi()) corgi_pm_machinfo.batfull_irq = 1; corgipm_device->dev.platform_data = &corgi_pm_machinfo; ret = platform_device_add(corgipm_device); if (ret) platform_device_put(corgipm_device); return ret; } static void corgipm_exit(void) { platform_device_unregister(corgipm_device); } module_init(corgipm_init); module_exit(corgipm_exit);
gpl-2.0
DroidHost/android_kernel_LGE_x5
block/bsg.c
4911
24290
/* * bsg.c - block layer implementation of the sg v4 interface * * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> * * This file is subject to the terms and conditions of the GNU General Public * License version 2. See the file "COPYING" in the main directory of this * archive for more details. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/file.h> #include <linux/blkdev.h> #include <linux/poll.h> #include <linux/cdev.h> #include <linux/jiffies.h> #include <linux/percpu.h> #include <linux/uio.h> #include <linux/idr.h> #include <linux/bsg.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/sg.h> #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" #define BSG_VERSION "0.4" struct bsg_device { struct request_queue *queue; spinlock_t lock; struct list_head busy_list; struct list_head done_list; struct hlist_node dev_list; atomic_t ref_count; int queued_cmds; int done_cmds; wait_queue_head_t wq_done; wait_queue_head_t wq_free; char name[20]; int max_queue; unsigned long flags; }; enum { BSG_F_BLOCK = 1, }; #define BSG_DEFAULT_CMDS 64 #define BSG_MAX_DEVS 32768 #undef BSG_DEBUG #ifdef BSG_DEBUG #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) #else #define dprintk(fmt, args...) #endif static DEFINE_MUTEX(bsg_mutex); static DEFINE_IDR(bsg_minor_idr); #define BSG_LIST_ARRAY_SIZE 8 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; static struct class *bsg_class; static int bsg_major; static struct kmem_cache *bsg_cmd_cachep; /* * our internal command type */ struct bsg_command { struct bsg_device *bd; struct list_head list; struct request *rq; struct bio *bio; struct bio *bidi_bio; int err; struct sg_io_v4 hdr; char sense[SCSI_SENSE_BUFFERSIZE]; }; static void bsg_free_command(struct bsg_command *bc) { struct bsg_device *bd = bc->bd; unsigned long flags; kmem_cache_free(bsg_cmd_cachep, bc); spin_lock_irqsave(&bd->lock, flags); bd->queued_cmds--; spin_unlock_irqrestore(&bd->lock, flags); wake_up(&bd->wq_free); } static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) { struct bsg_command *bc = ERR_PTR(-EINVAL); spin_lock_irq(&bd->lock); if (bd->queued_cmds >= bd->max_queue) goto out; bd->queued_cmds++; spin_unlock_irq(&bd->lock); bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); if (unlikely(!bc)) { spin_lock_irq(&bd->lock); bd->queued_cmds--; bc = ERR_PTR(-ENOMEM); goto out; } bc->bd = bd; INIT_LIST_HEAD(&bc->list); dprintk("%s: returning free cmd %p\n", bd->name, bc); return bc; out: spin_unlock_irq(&bd->lock); return bc; } static inline struct hlist_head *bsg_dev_idx_hash(int index) { return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; } static int bsg_io_schedule(struct bsg_device *bd) { DEFINE_WAIT(wait); int ret = 0; spin_lock_irq(&bd->lock); BUG_ON(bd->done_cmds > bd->queued_cmds); /* * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no * work to do", even though we return -ENOSPC after this same test * during bsg_write() -- there, it means our buffer can't have more * bsg_commands added to it, thus has no space left. */ if (bd->done_cmds == bd->queued_cmds) { ret = -ENODATA; goto unlock; } if (!test_bit(BSG_F_BLOCK, &bd->flags)) { ret = -EAGAIN; goto unlock; } prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(&bd->lock); io_schedule(); finish_wait(&bd->wq_done, &wait); return ret; unlock: spin_unlock_irq(&bd->lock); return ret; } static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, struct sg_io_v4 *hdr, struct bsg_device *bd, fmode_t has_write_perm) { if (hdr->request_len > BLK_MAX_CDB) { rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); if (!rq->cmd) return -ENOMEM; } if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, hdr->request_len)) return -EFAULT; if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { if (blk_verify_command(rq->cmd, has_write_perm)) return -EPERM; } else if (!capable(CAP_SYS_RAWIO)) return -EPERM; /* * fill in request structure */ rq->cmd_len = hdr->request_len; rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->timeout = msecs_to_jiffies(hdr->timeout); if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) rq->timeout = BLK_DEFAULT_SG_TIMEOUT; if (rq->timeout < BLK_MIN_SG_TIMEOUT) rq->timeout = BLK_MIN_SG_TIMEOUT; return 0; } /* * Check if sg_io_v4 from user is allowed and valid */ static int bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) { int ret = 0; if (hdr->guard != 'Q') return -EINVAL; switch (hdr->protocol) { case BSG_PROTOCOL_SCSI: switch (hdr->subprotocol) { case BSG_SUB_PROTOCOL_SCSI_CMD: case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: break; default: ret = -EINVAL; } break; default: ret = -EINVAL; } *rw = hdr->dout_xfer_len ? WRITE : READ; return ret; } /* * map sg_io_v4 to a request. */ static struct request * bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, u8 *sense) { struct request_queue *q = bd->queue; struct request *rq, *next_rq = NULL; int ret, rw; unsigned int dxfer_len; void __user *dxferp = NULL; struct bsg_class_device *bcd = &q->bsg_dev; /* if the LLD has been removed then the bsg_unregister_queue will * eventually be called and the class_dev was freed, so we can no * longer use this request_queue. Return no such address. */ if (!bcd->class_dev) return ERR_PTR(-ENXIO); dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, hdr->din_xfer_len); ret = bsg_validate_sgv4_hdr(q, hdr, &rw); if (ret) return ERR_PTR(ret); /* * map scatter-gather elements separately and string them to request */ rq = blk_get_request(q, rw, GFP_KERNEL); if (!rq) return ERR_PTR(-ENOMEM); ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); if (ret) goto out; if (rw == WRITE && hdr->din_xfer_len) { if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { ret = -EOPNOTSUPP; goto out; } next_rq = blk_get_request(q, READ, GFP_KERNEL); if (!next_rq) { ret = -ENOMEM; goto out; } rq->next_rq = next_rq; next_rq->cmd_type = rq->cmd_type; dxferp = (void __user *)(unsigned long)hdr->din_xferp; ret = blk_rq_map_user(q, next_rq, NULL, dxferp, hdr->din_xfer_len, GFP_KERNEL); if (ret) goto out; } if (hdr->dout_xfer_len) { dxfer_len = hdr->dout_xfer_len; dxferp = (void __user *)(unsigned long)hdr->dout_xferp; } else if (hdr->din_xfer_len) { dxfer_len = hdr->din_xfer_len; dxferp = (void __user *)(unsigned long)hdr->din_xferp; } else dxfer_len = 0; if (dxfer_len) { ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, GFP_KERNEL); if (ret) goto out; } rq->sense = sense; rq->sense_len = 0; return rq; out: if (rq->cmd != rq->__cmd) kfree(rq->cmd); blk_put_request(rq); if (next_rq) { blk_rq_unmap_user(next_rq->bio); blk_put_request(next_rq); } return ERR_PTR(ret); } /* * async completion call-back from the block layer, when scsi/ide/whatever * calls end_that_request_last() on a request */ static void bsg_rq_end_io(struct request *rq, int uptodate) { struct bsg_command *bc = rq->end_io_data; struct bsg_device *bd = bc->bd; unsigned long flags; dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", bd->name, rq, bc, bc->bio, uptodate); bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); spin_lock_irqsave(&bd->lock, flags); list_move_tail(&bc->list, &bd->done_list); bd->done_cmds++; spin_unlock_irqrestore(&bd->lock, flags); wake_up(&bd->wq_done); } /* * do final setup of a 'bc' and submit the matching 'rq' to the block * layer for io */ static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, struct bsg_command *bc, struct request *rq) { int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); /* * add bc command to busy queue and submit rq for io */ bc->rq = rq; bc->bio = rq->bio; if (rq->next_rq) bc->bidi_bio = rq->next_rq->bio; bc->hdr.duration = jiffies; spin_lock_irq(&bd->lock); list_add_tail(&bc->list, &bd->busy_list); spin_unlock_irq(&bd->lock); dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); rq->end_io_data = bc; blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); } static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) { struct bsg_command *bc = NULL; spin_lock_irq(&bd->lock); if (bd->done_cmds) { bc = list_first_entry(&bd->done_list, struct bsg_command, list); list_del(&bc->list); bd->done_cmds--; } spin_unlock_irq(&bd->lock); return bc; } /* * Get a finished command from the done list */ static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) { struct bsg_command *bc; int ret; do { bc = bsg_next_done_cmd(bd); if (bc) break; if (!test_bit(BSG_F_BLOCK, &bd->flags)) { bc = ERR_PTR(-EAGAIN); break; } ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); if (ret) { bc = ERR_PTR(-ERESTARTSYS); break; } } while (1); dprintk("%s: returning done %p\n", bd->name, bc); return bc; } static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, struct bio *bio, struct bio *bidi_bio) { int ret = 0; dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); /* * fill in all the output members */ hdr->device_status = rq->errors & 0xff; hdr->transport_status = host_byte(rq->errors); hdr->driver_status = driver_byte(rq->errors); hdr->info = 0; if (hdr->device_status || hdr->transport_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; hdr->response_len = 0; if (rq->sense_len && hdr->response) { int len = min_t(unsigned int, hdr->max_response_len, rq->sense_len); ret = copy_to_user((void __user *)(unsigned long)hdr->response, rq->sense, len); if (!ret) hdr->response_len = len; else ret = -EFAULT; } if (rq->next_rq) { hdr->dout_resid = rq->resid_len; hdr->din_resid = rq->next_rq->resid_len; blk_rq_unmap_user(bidi_bio); blk_put_request(rq->next_rq); } else if (rq_data_dir(rq) == READ) hdr->din_resid = rq->resid_len; else hdr->dout_resid = rq->resid_len; /* * If the request generated a negative error number, return it * (providing we aren't already returning an error); if it's * just a protocol response (i.e. non negative), that gets * processed above. */ if (!ret && rq->errors < 0) ret = rq->errors; blk_rq_unmap_user(bio); if (rq->cmd != rq->__cmd) kfree(rq->cmd); blk_put_request(rq); return ret; } static int bsg_complete_all_commands(struct bsg_device *bd) { struct bsg_command *bc; int ret, tret; dprintk("%s: entered\n", bd->name); /* * wait for all commands to complete */ ret = 0; do { ret = bsg_io_schedule(bd); /* * look for -ENODATA specifically -- we'll sometimes get * -ERESTARTSYS when we've taken a signal, but we can't * return until we're done freeing the queue, so ignore * it. The signal will get handled when we're done freeing * the bsg_device. */ } while (ret != -ENODATA); /* * discard done commands */ ret = 0; do { spin_lock_irq(&bd->lock); if (!bd->queued_cmds) { spin_unlock_irq(&bd->lock); break; } spin_unlock_irq(&bd->lock); bc = bsg_get_done_cmd(bd); if (IS_ERR(bc)) break; tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); if (!ret) ret = tret; bsg_free_command(bc); } while (1); return ret; } static int __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read) { struct bsg_command *bc; int nr_commands, ret; if (count % sizeof(struct sg_io_v4)) return -EINVAL; ret = 0; nr_commands = count / sizeof(struct sg_io_v4); while (nr_commands) { bc = bsg_get_done_cmd(bd); if (IS_ERR(bc)) { ret = PTR_ERR(bc); break; } /* * this is the only case where we need to copy data back * after completing the request. so do that here, * bsg_complete_work() cannot do that for us */ ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) ret = -EFAULT; bsg_free_command(bc); if (ret) break; buf += sizeof(struct sg_io_v4); *bytes_read += sizeof(struct sg_io_v4); nr_commands--; } return ret; } static inline void bsg_set_block(struct bsg_device *bd, struct file *file) { if (file->f_flags & O_NONBLOCK) clear_bit(BSG_F_BLOCK, &bd->flags); else set_bit(BSG_F_BLOCK, &bd->flags); } /* * Check if the error is a "real" error that we should return. */ static inline int err_block_err(int ret) { if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) return 1; return 0; } static ssize_t bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; int ret; ssize_t bytes_read; dprintk("%s: read %Zd bytes\n", bd->name, count); bsg_set_block(bd, file); bytes_read = 0; ret = __bsg_read(buf, count, bd, NULL, &bytes_read); *ppos = bytes_read; if (!bytes_read || err_block_err(ret)) bytes_read = ret; return bytes_read; } static int __bsg_write(struct bsg_device *bd, const char __user *buf, size_t count, ssize_t *bytes_written, fmode_t has_write_perm) { struct bsg_command *bc; struct request *rq; int ret, nr_commands; if (count % sizeof(struct sg_io_v4)) return -EINVAL; nr_commands = count / sizeof(struct sg_io_v4); rq = NULL; bc = NULL; ret = 0; while (nr_commands) { struct request_queue *q = bd->queue; bc = bsg_alloc_command(bd); if (IS_ERR(bc)) { ret = PTR_ERR(bc); bc = NULL; break; } if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { ret = -EFAULT; break; } /* * get a request, fill in the blanks, and add to request queue */ rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); if (IS_ERR(rq)) { ret = PTR_ERR(rq); rq = NULL; break; } bsg_add_command(bd, q, bc, rq); bc = NULL; rq = NULL; nr_commands--; buf += sizeof(struct sg_io_v4); *bytes_written += sizeof(struct sg_io_v4); } if (bc) bsg_free_command(bc); return ret; } static ssize_t bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; ssize_t bytes_written; int ret; dprintk("%s: write %Zd bytes\n", bd->name, count); bsg_set_block(bd, file); bytes_written = 0; ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode & FMODE_WRITE); *ppos = bytes_written; /* * return bytes written on non-fatal errors */ if (!bytes_written || err_block_err(ret)) bytes_written = ret; dprintk("%s: returning %Zd\n", bd->name, bytes_written); return bytes_written; } static struct bsg_device *bsg_alloc_device(void) { struct bsg_device *bd; bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); if (unlikely(!bd)) return NULL; spin_lock_init(&bd->lock); bd->max_queue = BSG_DEFAULT_CMDS; INIT_LIST_HEAD(&bd->busy_list); INIT_LIST_HEAD(&bd->done_list); INIT_HLIST_NODE(&bd->dev_list); init_waitqueue_head(&bd->wq_free); init_waitqueue_head(&bd->wq_done); return bd; } static void bsg_kref_release_function(struct kref *kref) { struct bsg_class_device *bcd = container_of(kref, struct bsg_class_device, ref); struct device *parent = bcd->parent; if (bcd->release) bcd->release(bcd->parent); put_device(parent); } static int bsg_put_device(struct bsg_device *bd) { int ret = 0, do_free; struct request_queue *q = bd->queue; mutex_lock(&bsg_mutex); do_free = atomic_dec_and_test(&bd->ref_count); if (!do_free) { mutex_unlock(&bsg_mutex); goto out; } hlist_del(&bd->dev_list); mutex_unlock(&bsg_mutex); dprintk("%s: tearing down\n", bd->name); /* * close can always block */ set_bit(BSG_F_BLOCK, &bd->flags); /* * correct error detection baddies here again. it's the responsibility * of the app to properly reap commands before close() if it wants * fool-proof error detection */ ret = bsg_complete_all_commands(bd); kfree(bd); out: kref_put(&q->bsg_dev.ref, bsg_kref_release_function); if (do_free) blk_put_queue(q); return ret; } static struct bsg_device *bsg_add_device(struct inode *inode, struct request_queue *rq, struct file *file) { struct bsg_device *bd; #ifdef BSG_DEBUG unsigned char buf[32]; #endif if (!blk_get_queue(rq)) return ERR_PTR(-ENXIO); bd = bsg_alloc_device(); if (!bd) { blk_put_queue(rq); return ERR_PTR(-ENOMEM); } bd->queue = rq; bsg_set_block(bd, file); atomic_set(&bd->ref_count, 1); mutex_lock(&bsg_mutex); hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); dprintk("bound to <%s>, max queue %d\n", format_dev_t(buf, inode->i_rdev), bd->max_queue); mutex_unlock(&bsg_mutex); return bd; } static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) { struct bsg_device *bd; struct hlist_node *entry; mutex_lock(&bsg_mutex); hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { if (bd->queue == q) { atomic_inc(&bd->ref_count); goto found; } } bd = NULL; found: mutex_unlock(&bsg_mutex); return bd; } static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) { struct bsg_device *bd; struct bsg_class_device *bcd; /* * find the class device */ mutex_lock(&bsg_mutex); bcd = idr_find(&bsg_minor_idr, iminor(inode)); if (bcd) kref_get(&bcd->ref); mutex_unlock(&bsg_mutex); if (!bcd) return ERR_PTR(-ENODEV); bd = __bsg_get_device(iminor(inode), bcd->queue); if (bd) return bd; bd = bsg_add_device(inode, bcd->queue, file); if (IS_ERR(bd)) kref_put(&bcd->ref, bsg_kref_release_function); return bd; } static int bsg_open(struct inode *inode, struct file *file) { struct bsg_device *bd; bd = bsg_get_device(inode, file); if (IS_ERR(bd)) return PTR_ERR(bd); file->private_data = bd; return 0; } static int bsg_release(struct inode *inode, struct file *file) { struct bsg_device *bd = file->private_data; file->private_data = NULL; return bsg_put_device(bd); } static unsigned int bsg_poll(struct file *file, poll_table *wait) { struct bsg_device *bd = file->private_data; unsigned int mask = 0; poll_wait(file, &bd->wq_done, wait); poll_wait(file, &bd->wq_free, wait); spin_lock_irq(&bd->lock); if (!list_empty(&bd->done_list)) mask |= POLLIN | POLLRDNORM; if (bd->queued_cmds < bd->max_queue) mask |= POLLOUT; spin_unlock_irq(&bd->lock); return mask; } static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct bsg_device *bd = file->private_data; int __user *uarg = (int __user *) arg; int ret; switch (cmd) { /* * our own ioctls */ case SG_GET_COMMAND_Q: return put_user(bd->max_queue, uarg); case SG_SET_COMMAND_Q: { int queue; if (get_user(queue, uarg)) return -EFAULT; if (queue < 1) return -EINVAL; spin_lock_irq(&bd->lock); bd->max_queue = queue; spin_unlock_irq(&bd->lock); return 0; } /* * SCSI/sg ioctls */ case SG_GET_VERSION_NUM: case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SG_SET_TIMEOUT: case SG_GET_TIMEOUT: case SG_GET_RESERVED_SIZE: case SG_SET_RESERVED_SIZE: case SG_EMULATED_HOST: case SCSI_IOCTL_SEND_COMMAND: { void __user *uarg = (void __user *) arg; return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); } case SG_IO: { struct request *rq; struct bio *bio, *bidi_bio = NULL; struct sg_io_v4 hdr; int at_head; u8 sense[SCSI_SENSE_BUFFERSIZE]; if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); if (IS_ERR(rq)) return PTR_ERR(rq); bio = rq->bio; if (rq->next_rq) bidi_bio = rq->next_rq->bio; at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); blk_execute_rq(bd->queue, NULL, rq, at_head); ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); if (copy_to_user(uarg, &hdr, sizeof(hdr))) return -EFAULT; return ret; } /* * block device ioctls */ default: #if 0 return ioctl_by_bdev(bd->bdev, cmd, arg); #else return -ENOTTY; #endif } } static const struct file_operations bsg_fops = { .read = bsg_read, .write = bsg_write, .poll = bsg_poll, .open = bsg_open, .release = bsg_release, .unlocked_ioctl = bsg_ioctl, .owner = THIS_MODULE, .llseek = default_llseek, }; void bsg_unregister_queue(struct request_queue *q) { struct bsg_class_device *bcd = &q->bsg_dev; if (!bcd->class_dev) return; mutex_lock(&bsg_mutex); idr_remove(&bsg_minor_idr, bcd->minor); if (q->kobj.sd) sysfs_remove_link(&q->kobj, "bsg"); device_unregister(bcd->class_dev); bcd->class_dev = NULL; kref_put(&bcd->ref, bsg_kref_release_function); mutex_unlock(&bsg_mutex); } EXPORT_SYMBOL_GPL(bsg_unregister_queue); int bsg_register_queue(struct request_queue *q, struct device *parent, const char *name, void (*release)(struct device *)) { struct bsg_class_device *bcd; dev_t dev; int ret, minor; struct device *class_dev = NULL; const char *devname; if (name) devname = name; else devname = dev_name(parent); /* * we need a proper transport to send commands, not a stacked device */ if (!q->request_fn) return 0; bcd = &q->bsg_dev; memset(bcd, 0, sizeof(*bcd)); mutex_lock(&bsg_mutex); ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL); if (!ret) { ret = -ENOMEM; goto unlock; } ret = idr_get_new(&bsg_minor_idr, bcd, &minor); if (ret < 0) goto unlock; if (minor >= BSG_MAX_DEVS) { printk(KERN_ERR "bsg: too many bsg devices\n"); ret = -EINVAL; goto remove_idr; } bcd->minor = minor; bcd->queue = q; bcd->parent = get_device(parent); bcd->release = release; kref_init(&bcd->ref); dev = MKDEV(bsg_major, bcd->minor); class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); if (IS_ERR(class_dev)) { ret = PTR_ERR(class_dev); goto put_dev; } bcd->class_dev = class_dev; if (q->kobj.sd) { ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); if (ret) goto unregister_class_dev; } mutex_unlock(&bsg_mutex); return 0; unregister_class_dev: device_unregister(class_dev); put_dev: put_device(parent); remove_idr: idr_remove(&bsg_minor_idr, minor); unlock: mutex_unlock(&bsg_mutex); return ret; } EXPORT_SYMBOL_GPL(bsg_register_queue); static struct cdev bsg_cdev; static char *bsg_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); } static int __init bsg_init(void) { int ret, i; dev_t devid; bsg_cmd_cachep = kmem_cache_create("bsg_cmd", sizeof(struct bsg_command), 0, 0, NULL); if (!bsg_cmd_cachep) { printk(KERN_ERR "bsg: failed creating slab cache\n"); return -ENOMEM; } for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) INIT_HLIST_HEAD(&bsg_device_list[i]); bsg_class = class_create(THIS_MODULE, "bsg"); if (IS_ERR(bsg_class)) { ret = PTR_ERR(bsg_class); goto destroy_kmemcache; } bsg_class->devnode = bsg_devnode; ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); if (ret) goto destroy_bsg_class; bsg_major = MAJOR(devid); cdev_init(&bsg_cdev, &bsg_fops); ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); if (ret) goto unregister_chrdev; printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION " loaded (major %d)\n", bsg_major); return 0; unregister_chrdev: unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); destroy_bsg_class: class_destroy(bsg_class); destroy_kmemcache: kmem_cache_destroy(bsg_cmd_cachep); return ret; } MODULE_AUTHOR("Jens Axboe"); MODULE_DESCRIPTION(BSG_DESCRIPTION); MODULE_LICENSE("GPL"); device_initcall(bsg_init);
gpl-2.0
MoKee/android_kernel_samsung_tuna
fs/hpfs/buffer.c
7983
3825
/* * linux/fs/hpfs/buffer.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * general buffer i/o */ #include <linux/sched.h> #include <linux/slab.h> #include "hpfs_fn.h" /* Map a sector into a buffer and return pointers to it and to the buffer. */ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp, int ahead) { struct buffer_head *bh; hpfs_lock_assert(s); cond_resched(); *bhp = bh = sb_bread(s, secno); if (bh != NULL) return bh->b_data; else { printk("HPFS: hpfs_map_sector: read error\n"); return NULL; } } /* Like hpfs_map_sector but don't read anything */ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp) { struct buffer_head *bh; /*return hpfs_map_sector(s, secno, bhp, 0);*/ hpfs_lock_assert(s); cond_resched(); if ((*bhp = bh = sb_getblk(s, secno)) != NULL) { if (!buffer_uptodate(bh)) wait_on_buffer(bh); set_buffer_uptodate(bh); return bh->b_data; } else { printk("HPFS: hpfs_get_sector: getblk failed\n"); return NULL; } } /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh, int ahead) { struct buffer_head *bh; char *data; hpfs_lock_assert(s); cond_resched(); if (secno & 3) { printk("HPFS: hpfs_map_4sectors: unaligned read\n"); return NULL; } qbh->data = data = kmalloc(2048, GFP_NOFS); if (!data) { printk("HPFS: hpfs_map_4sectors: out of memory\n"); goto bail; } qbh->bh[0] = bh = sb_bread(s, secno); if (!bh) goto bail0; memcpy(data, bh->b_data, 512); qbh->bh[1] = bh = sb_bread(s, secno + 1); if (!bh) goto bail1; memcpy(data + 512, bh->b_data, 512); qbh->bh[2] = bh = sb_bread(s, secno + 2); if (!bh) goto bail2; memcpy(data + 2 * 512, bh->b_data, 512); qbh->bh[3] = bh = sb_bread(s, secno + 3); if (!bh) goto bail3; memcpy(data + 3 * 512, bh->b_data, 512); return data; bail3: brelse(qbh->bh[2]); bail2: brelse(qbh->bh[1]); bail1: brelse(qbh->bh[0]); bail0: kfree(data); printk("HPFS: hpfs_map_4sectors: read error\n"); bail: return NULL; } /* Don't read sectors */ void *hpfs_get_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh) { cond_resched(); hpfs_lock_assert(s); if (secno & 3) { printk("HPFS: hpfs_get_4sectors: unaligned read\n"); return NULL; } /*return hpfs_map_4sectors(s, secno, qbh, 0);*/ if (!(qbh->data = kmalloc(2048, GFP_NOFS))) { printk("HPFS: hpfs_get_4sectors: out of memory\n"); return NULL; } if (!(hpfs_get_sector(s, secno, &qbh->bh[0]))) goto bail0; if (!(hpfs_get_sector(s, secno + 1, &qbh->bh[1]))) goto bail1; if (!(hpfs_get_sector(s, secno + 2, &qbh->bh[2]))) goto bail2; if (!(hpfs_get_sector(s, secno + 3, &qbh->bh[3]))) goto bail3; memcpy(qbh->data, qbh->bh[0]->b_data, 512); memcpy(qbh->data + 512, qbh->bh[1]->b_data, 512); memcpy(qbh->data + 2*512, qbh->bh[2]->b_data, 512); memcpy(qbh->data + 3*512, qbh->bh[3]->b_data, 512); return qbh->data; bail3: brelse(qbh->bh[2]); bail2: brelse(qbh->bh[1]); bail1: brelse(qbh->bh[0]); bail0: return NULL; } void hpfs_brelse4(struct quad_buffer_head *qbh) { brelse(qbh->bh[3]); brelse(qbh->bh[2]); brelse(qbh->bh[1]); brelse(qbh->bh[0]); kfree(qbh->data); } void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh) { PRINTK(("hpfs_mark_4buffers_dirty\n")); memcpy(qbh->bh[0]->b_data, qbh->data, 512); memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512); memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512); memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512); mark_buffer_dirty(qbh->bh[0]); mark_buffer_dirty(qbh->bh[1]); mark_buffer_dirty(qbh->bh[2]); mark_buffer_dirty(qbh->bh[3]); }
gpl-2.0
wyldstallyns/Roughneck_kernel_m8-GPE-5.1
arch/powerpc/sysdev/mpic_pasemi_msi.c
9775
4554
/* * Copyright 2007, Olof Johansson, PA Semi * * Based on arch/powerpc/sysdev/mpic_u3msi.c: * * Copyright 2006, Segher Boessenkool, IBM Corporation. * Copyright 2006-2007, Michael Ellerman, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 of the * License. * */ #undef DEBUG #include <linux/irq.h> #include <linux/bootmem.h> #include <linux/msi.h> #include <asm/mpic.h> #include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/msi_bitmap.h> #include "mpic.h" /* Allocate 16 interrupts per device, to give an alignment of 16, * since that's the size of the grouping w.r.t. affinity. If someone * needs more than 32 MSI's down the road we'll have to rethink this, * but it should be OK for now. */ #define ALLOC_CHUNK 16 #define PASEMI_MSI_ADDR 0xfc080000 /* A bit ugly, can we get this from the pci_dev somehow? */ static struct mpic *msi_mpic; static void mpic_pasemi_msi_mask_irq(struct irq_data *data) { pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); mask_msi_irq(data); mpic_mask_irq(data); } static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) { pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); mpic_unmask_irq(data); unmask_msi_irq(data); } static struct irq_chip mpic_pasemi_msi_chip = { .irq_shutdown = mpic_pasemi_msi_mask_irq, .irq_mask = mpic_pasemi_msi_mask_irq, .irq_unmask = mpic_pasemi_msi_unmask_irq, .irq_eoi = mpic_end_irq, .irq_set_type = mpic_set_irq_type, .irq_set_affinity = mpic_set_affinity, .name = "PASEMI-MSI", }; static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type) { if (type == PCI_CAP_ID_MSIX) pr_debug("pasemi_msi: MSI-X untested, trying anyway\n"); return 0; } static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) { struct msi_desc *entry; pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev); list_for_each_entry(entry, &pdev->msi_list, list) { if (entry->irq == NO_IRQ) continue; irq_set_msi_desc(entry->irq, NULL); msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, virq_to_hw(entry->irq), ALLOC_CHUNK); irq_dispose_mapping(entry->irq); } return; } static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { unsigned int virq; struct msi_desc *entry; struct msi_msg msg; int hwirq; pr_debug("pasemi_msi_setup_msi_irqs, pdev %p nvec %d type %d\n", pdev, nvec, type); msg.address_hi = 0; msg.address_lo = PASEMI_MSI_ADDR; list_for_each_entry(entry, &pdev->msi_list, list) { /* Allocate 16 interrupts for now, since that's the grouping for * affinity. This can be changed later if it turns out 32 is too * few MSIs for someone, but restrictions will apply to how the * sources can be changed independently. */ hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, ALLOC_CHUNK); if (hwirq < 0) { pr_debug("pasemi_msi: failed allocating hwirq\n"); return hwirq; } virq = irq_create_mapping(msi_mpic->irqhost, hwirq); if (virq == NO_IRQ) { pr_debug("pasemi_msi: failed mapping hwirq 0x%x\n", hwirq); msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK); return -ENOSPC; } /* Vector on MSI is really an offset, the hardware adds * it to the value written at the magic address. So set * it to 0 to remain sane. */ mpic_set_vector(virq, 0); irq_set_msi_desc(virq, entry); irq_set_chip(virq, &mpic_pasemi_msi_chip); irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \ "addr 0x%x\n", virq, hwirq, msg.address_lo); /* Likewise, the device writes [0...511] into the target * register to generate MSI [512...1023] */ msg.data = hwirq-0x200; write_msi_msg(virq, &msg); } return 0; } int mpic_pasemi_msi_init(struct mpic *mpic) { int rc; if (!mpic->irqhost->of_node || !of_device_is_compatible(mpic->irqhost->of_node, "pasemi,pwrficient-openpic")) return -ENODEV; rc = mpic_msi_init_allocator(mpic); if (rc) { pr_debug("pasemi_msi: Error allocating bitmap!\n"); return rc; } pr_debug("pasemi_msi: Registering PA Semi MPIC MSI callbacks\n"); msi_mpic = mpic; WARN_ON(ppc_md.setup_msi_irqs); ppc_md.setup_msi_irqs = pasemi_msi_setup_msi_irqs; ppc_md.teardown_msi_irqs = pasemi_msi_teardown_msi_irqs; ppc_md.msi_check_device = pasemi_msi_check_device; return 0; }
gpl-2.0
xb446909/personalprojects
ARMToolChain/source/gcc-5.2.0/libgcc/config/spu/cachemgr.c
48
12244
/* Copyright (C) 2008-2015 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include <spu_mfcio.h> #include <spu_internals.h> #include <spu_intrinsics.h> #include <spu_cache.h> extern unsigned long long __ea_local_store; extern char __cache_tag_array_size; #define LINE_SIZE 128 #define TAG_MASK (LINE_SIZE - 1) #define WAYS 4 #define SET_MASK ((int) &__cache_tag_array_size - LINE_SIZE) #define CACHE_LINES ((int) &__cache_tag_array_size / \ sizeof (struct __cache_tag_array) * WAYS) struct __cache_tag_array { unsigned int tag_lo[WAYS]; unsigned int tag_hi[WAYS]; void *base[WAYS]; int reserved[WAYS]; vector unsigned short dirty_bits[WAYS]; }; extern struct __cache_tag_array __cache_tag_array[]; extern char __cache[]; /* In order to make the code seem a little cleaner, and to avoid having 64/32 bit ifdefs all over the place, we use macros. */ #ifdef __EA64__ typedef unsigned long long addr; #define CHECK_TAG(_entry, _way, _tag) \ ((_entry)->tag_lo[(_way)] == ((_tag) & 0xFFFFFFFF) \ && (_entry)->tag_hi[(_way)] == ((_tag) >> 32)) #define GET_TAG(_entry, _way) \ ((unsigned long long)(_entry)->tag_hi[(_way)] << 32 \ | (unsigned long long)(_entry)->tag_lo[(_way)]) #define SET_TAG(_entry, _way, _tag) \ (_entry)->tag_lo[(_way)] = (_tag) & 0xFFFFFFFF; \ (_entry)->tag_hi[(_way)] = (_tag) >> 32 #else /*__EA32__*/ typedef unsigned long addr; #define CHECK_TAG(_entry, _way, _tag) \ ((_entry)->tag_lo[(_way)] == (_tag)) #define GET_TAG(_entry, _way) \ ((_entry)->tag_lo[(_way)]) #define SET_TAG(_entry, _way, _tag) \ (_entry)->tag_lo[(_way)] = (_tag) #endif /* In GET_ENTRY, we cast away the high 32 bits, as the tag is only in the low 32. */ #define GET_ENTRY(_addr) \ ((struct __cache_tag_array *) \ si_to_uint (si_a (si_and (si_from_uint ((unsigned int) (addr) (_addr)), \ si_from_uint (SET_MASK)), \ si_from_uint ((unsigned int) __cache_tag_array)))) #define GET_CACHE_LINE(_addr, _way) \ ((void *) (__cache + ((_addr) & SET_MASK) * WAYS) + ((_way) * LINE_SIZE)); #define CHECK_DIRTY(_vec) (si_to_uint (si_orx ((qword) (_vec)))) #define SET_EMPTY(_entry, _way) ((_entry)->tag_lo[(_way)] = 1) #define CHECK_EMPTY(_entry, _way) ((_entry)->tag_lo[(_way)] == 1) #define LS_FLAG 0x80000000 #define SET_IS_LS(_entry, _way) ((_entry)->reserved[(_way)] |= LS_FLAG) #define CHECK_IS_LS(_entry, _way) ((_entry)->reserved[(_way)] & LS_FLAG) #define GET_LRU(_entry, _way) ((_entry)->reserved[(_way)] & ~LS_FLAG) static int dma_tag = 32; static void __cache_evict_entry (struct __cache_tag_array *entry, int way) { addr tag = GET_TAG (entry, way); if (CHECK_DIRTY (entry->dirty_bits[way]) && !CHECK_IS_LS (entry, way)) { #ifdef NONATOMIC /* Non-atomic writes. */ unsigned int oldmask, mach_stat; char *line = ((void *) 0); /* Enter critical section. */ mach_stat = spu_readch (SPU_RdMachStat); spu_idisable (); /* Issue DMA request. */ line = GET_CACHE_LINE (entry->tag_lo[way], way); mfc_put (line, tag, LINE_SIZE, dma_tag, 0, 0); /* Wait for DMA completion. */ oldmask = mfc_read_tag_mask (); mfc_write_tag_mask (1 << dma_tag); mfc_read_tag_status_all (); mfc_write_tag_mask (oldmask); /* Leave critical section. */ if (__builtin_expect (mach_stat & 1, 0)) spu_ienable (); #else /* Allocate a buffer large enough that we know it has 128 bytes that are 128 byte aligned (for DMA). */ char buffer[LINE_SIZE + 127]; qword *buf_ptr = (qword *) (((unsigned int) (buffer) + 127) & ~127); qword *line = GET_CACHE_LINE (entry->tag_lo[way], way); qword bits; unsigned int mach_stat; /* Enter critical section. */ mach_stat = spu_readch (SPU_RdMachStat); spu_idisable (); do { /* We atomically read the current memory into a buffer modify the dirty bytes in the buffer, and write it back. If writeback fails, loop and try again. */ mfc_getllar (buf_ptr, tag, 0, 0); mfc_read_atomic_status (); /* The method we're using to write 16 dirty bytes into the buffer at a time uses fsmb which in turn uses the least significant 16 bits of word 0, so we load the bits and rotate so that the first bit of the bitmap is in the first bit that fsmb will use. */ bits = (qword) entry->dirty_bits[way]; bits = si_rotqbyi (bits, -2); /* Si_fsmb creates the mask of dirty bytes. Use selb to nab the appropriate bits. */ buf_ptr[0] = si_selb (buf_ptr[0], line[0], si_fsmb (bits)); /* Rotate to next 16 byte section of cache. */ bits = si_rotqbyi (bits, 2); buf_ptr[1] = si_selb (buf_ptr[1], line[1], si_fsmb (bits)); bits = si_rotqbyi (bits, 2); buf_ptr[2] = si_selb (buf_ptr[2], line[2], si_fsmb (bits)); bits = si_rotqbyi (bits, 2); buf_ptr[3] = si_selb (buf_ptr[3], line[3], si_fsmb (bits)); bits = si_rotqbyi (bits, 2); buf_ptr[4] = si_selb (buf_ptr[4], line[4], si_fsmb (bits)); bits = si_rotqbyi (bits, 2); buf_ptr[5] = si_selb (buf_ptr[5], line[5], si_fsmb (bits)); bits = si_rotqbyi (bits, 2); buf_ptr[6] = si_selb (buf_ptr[6], line[6], si_fsmb (bits)); bits = si_rotqbyi (bits, 2); buf_ptr[7] = si_selb (buf_ptr[7], line[7], si_fsmb (bits)); bits = si_rotqbyi (bits, 2); mfc_putllc (buf_ptr, tag, 0, 0); } while (mfc_read_atomic_status ()); /* Leave critical section. */ if (__builtin_expect (mach_stat & 1, 0)) spu_ienable (); #endif } /* In any case, marking the lo tag with 1 which denotes empty. */ SET_EMPTY (entry, way); entry->dirty_bits[way] = (vector unsigned short) si_from_uint (0); } void __cache_evict (__ea void *ea) { addr tag = (addr) ea & ~TAG_MASK; struct __cache_tag_array *entry = GET_ENTRY (ea); int i = 0; /* Cycles through all the possible ways an address could be at and evicts the way if found. */ for (i = 0; i < WAYS; i++) if (CHECK_TAG (entry, i, tag)) __cache_evict_entry (entry, i); } static void * __cache_fill (int way, addr tag) { unsigned int oldmask, mach_stat; char *line = ((void *) 0); /* Reserve our DMA tag. */ if (dma_tag == 32) dma_tag = mfc_tag_reserve (); /* Enter critical section. */ mach_stat = spu_readch (SPU_RdMachStat); spu_idisable (); /* Issue DMA request. */ line = GET_CACHE_LINE (tag, way); mfc_get (line, tag, LINE_SIZE, dma_tag, 0, 0); /* Wait for DMA completion. */ oldmask = mfc_read_tag_mask (); mfc_write_tag_mask (1 << dma_tag); mfc_read_tag_status_all (); mfc_write_tag_mask (oldmask); /* Leave critical section. */ if (__builtin_expect (mach_stat & 1, 0)) spu_ienable (); return (void *) line; } static void __cache_miss (__ea void *ea, struct __cache_tag_array *entry, int way) { addr tag = (addr) ea & ~TAG_MASK; unsigned int lru = 0; int i = 0; int idx = 0; /* If way > 4, then there are no empty slots, so we must evict the least recently used entry. */ if (way >= 4) { for (i = 0; i < WAYS; i++) { if (GET_LRU (entry, i) > lru) { lru = GET_LRU (entry, i); idx = i; } } __cache_evict_entry (entry, idx); way = idx; } /* Set the empty entry's tag and fill it's cache line. */ SET_TAG (entry, way, tag); entry->reserved[way] = 0; /* Check if the address is just an effective address within the SPU's local store. */ /* Because the LS is not 256k aligned, we can't do a nice and mask here to compare, so we must check the whole range. */ if ((addr) ea >= (addr) __ea_local_store && (addr) ea < (addr) (__ea_local_store + 0x40000)) { SET_IS_LS (entry, way); entry->base[way] = (void *) ((unsigned int) ((addr) ea - (addr) __ea_local_store) & ~0x7f); } else { entry->base[way] = __cache_fill (way, tag); } } void * __cache_fetch_dirty (__ea void *ea, int n_bytes_dirty) { #ifdef __EA64__ unsigned int tag_hi; qword etag_hi; #endif unsigned int tag_lo; struct __cache_tag_array *entry; qword etag_lo; qword equal; qword bit_mask; qword way; /* This first chunk, we merely fill the pointer and tag. */ entry = GET_ENTRY (ea); #ifndef __EA64__ tag_lo = si_to_uint (si_andc (si_shufb (si_from_uint ((addr) ea), si_from_uint (0), si_from_uint (0x00010203)), si_from_uint (TAG_MASK))); #else tag_lo = si_to_uint (si_andc (si_shufb (si_from_ullong ((addr) ea), si_from_uint (0), si_from_uint (0x04050607)), si_from_uint (TAG_MASK))); tag_hi = si_to_uint (si_shufb (si_from_ullong ((addr) ea), si_from_uint (0), si_from_uint (0x00010203))); #endif /* Increment LRU in reserved bytes. */ si_stqd (si_ai (si_lqd (si_from_ptr (entry), 48), 1), si_from_ptr (entry), 48); missreturn: /* Check if the entry's lo_tag is equal to the address' lo_tag. */ etag_lo = si_lqd (si_from_ptr (entry), 0); equal = si_ceq (etag_lo, si_from_uint (tag_lo)); #ifdef __EA64__ /* And the high tag too. */ etag_hi = si_lqd (si_from_ptr (entry), 16); equal = si_and (equal, (si_ceq (etag_hi, si_from_uint (tag_hi)))); #endif if ((si_to_uint (si_orx (equal)) == 0)) goto misshandler; if (n_bytes_dirty) { /* way = 0x40,0x50,0x60,0x70 for each way, which is also the offset of the appropriate dirty bits. */ way = si_shli (si_clz (si_gbb (equal)), 2); /* To create the bit_mask, we set it to all 1s (uint -1), then we shift it over (128 - n_bytes_dirty) times. */ bit_mask = si_from_uint (-1); bit_mask = si_shlqby (bit_mask, si_from_uint ((LINE_SIZE - n_bytes_dirty) / 8)); bit_mask = si_shlqbi (bit_mask, si_from_uint ((LINE_SIZE - n_bytes_dirty) % 8)); /* Rotate it around to the correct offset. */ bit_mask = si_rotqby (bit_mask, si_from_uint (-1 * ((addr) ea & TAG_MASK) / 8)); bit_mask = si_rotqbi (bit_mask, si_from_uint (-1 * ((addr) ea & TAG_MASK) % 8)); /* Update the dirty bits. */ si_stqx (si_or (si_lqx (si_from_ptr (entry), way), bit_mask), si_from_ptr (entry), way); }; /* We've definitely found the right entry, set LRU (reserved) to 0 maintaining the LS flag (MSB). */ si_stqd (si_andc (si_lqd (si_from_ptr (entry), 48), si_and (equal, si_from_uint (~(LS_FLAG)))), si_from_ptr (entry), 48); return (void *) si_to_uint (si_a (si_orx (si_and (si_lqd (si_from_ptr (entry), 32), equal)), si_from_uint (((unsigned int) (addr) ea) & TAG_MASK))); misshandler: equal = si_ceqi (etag_lo, 1); __cache_miss (ea, entry, (si_to_uint (si_clz (si_gbb (equal))) - 16) >> 2); goto missreturn; } void * __cache_fetch (__ea void *ea) { return __cache_fetch_dirty (ea, 0); } void __cache_touch (__ea void *ea __attribute__ ((unused))) { /* NO-OP for now. */ } void __cache_flush (void) __attribute__ ((destructor)); void __cache_flush (void) { struct __cache_tag_array *entry = __cache_tag_array; unsigned int i; int j; /* Cycle through each cache entry and evict all used ways. */ for (i = 0; i < CACHE_LINES / WAYS; i++) { for (j = 0; j < WAYS; j++) if (!CHECK_EMPTY (entry, j)) __cache_evict_entry (entry, j); entry++; } }
gpl-2.0
omasanori/ctags
argproc.c
48
11683
/* * $Id$ * * Copyright (c) 1989, Mark Pizzolato (mark@infopiz.uucp) * * This source code is released for free distribution under the terms of the * GNU General Public License. * * Provided by Stephen P. Wall <swall@redcom.com> * Extracted from the VMS port of GNU patch-2.1. * * This module provides redirection support for the VAX DECC port of * Exuberant Ctags. */ /* * @(#)argproc.c 1.0 89/02/01 Mark Pizzolato (mark@infopiz.uucp) */ #ifndef lint char argproc_version [] = "@(#)argproc.c VMS uucp Version infopiz-1.0"; #endif #include <ctype.h> #include <descrip.h> #include <dvidef.h> #include <errno.h> #include <iodef.h> #include <lib$routines.h> #include <starlet.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <syidef.h> /* System Information Definitions */ #define EXIT_OK 1 /* image exit code */ #define EXIT_ERR 0x10000000 /* image exit code */ /* * getredirection() is intended to aid in porting C programs * to VMS (Vax-11 C) which does not support '>' and '<' * I/O redirection, along with a command line pipe mechanism * using the '|' AND background command execution '&'. * The piping mechanism will probably work with almost any 'filter' type * of program. With suitable modification, it may useful for other * portability problems as well. * * Author: Mark Pizzolato mark@infopiz.UUCP * Mods: Steve Wall Don't return a full path unless the * original filename included a path. */ struct list_item { struct list_item *next; char *value; }; static expand_wild_cards (); static char *pipe_and_fork (); int getredirection (ac, av) int *ac; char ***av; /* * Process vms redirection arg's. Exit if any error is seen. * If getredirection() processes an argument, it is erased * from the vector. getredirection () returns a new argc and argv value. * In the event that a background command is requested (by a trailing "&"), * this routine creates a background subprocess, and simply exits the program. * * Warning: do not try to simplify the code for vms. The code * presupposes that getredirection() is called before any data is * read from stdin or written to stdout. * * Normal usage is as follows: * * main (argc, argv) * int argc; * char *argv []; * { * getredirection (&argc, &argv); * } */ { int argc = *ac; /* Argument Count */ char **argv = *av; /* Argument Vector */ char *ap; /* Argument pointer */ int j; /* argv [] index */ extern int errno; /* Last vms i/o error */ int item_count = 0; /* Count of Items in List */ struct list_item *list_head = 0; /* First Item in List */ struct list_item *list_tail; /* Last Item in List */ char *in = NULL; /* Input File Name */ char *out = NULL; /* Output File Name */ char *outmode = "w"; /* Mode to Open Output File */ int cmargc = 0; /* Piped Command Arg Count */ char **cmargv = NULL;/* Piped Command Arg Vector */ /* * First handle the case where the last thing on the line ends with * a '&'. This indicates the desire for the command to be run in a * subprocess, so we satisfy that desire. */ { extern background_process (); ap = argv [argc-1]; if (0 == strcmp ("&", ap)) exit (background_process (--argc, argv)); if ('&' == ap [strlen (ap)-1]) { ap [strlen (ap)-1] = '\0'; exit (background_process (argc, argv)); } } /* * Now we handle the general redirection cases that involve '>', '>>', * '<', and pipes '|'. */ for (j = 0; j < argc; ++j) { if (0 == strcmp ("<", argv [j])) { if (j+1 >= argc) { errno = EINVAL; perror ("No input file"); exit (EXIT_ERR); } in = argv [++j]; continue; } if ('<' == *(ap = argv [j])) { in = 1 + ap; continue; } if (0 == strcmp (">", ap)) { if (j+1 >= argc) { errno = EINVAL; perror ("No output file"); exit (EXIT_ERR); } out = argv [++j]; continue; } if ('>' == *ap) { if ('>' == ap [1]) { outmode = "a"; if ('\0' == ap [2]) out = argv [++j]; else out = 2 + ap; } else out = 1 + ap; continue; } if (0 == strcmp ("|", argv [j])) { if (j+1 >= argc) { errno = EPIPE; perror ("No command to Pipe to"); exit (EXIT_ERR); } cmargc = argc- (j+1); cmargv = &argv [j+1]; argc = j; continue; } if ('|' == *(ap = argv [j])) { ++argv [j]; cmargc = argc-j; cmargv = &argv [j]; argc = j; continue; } expand_wild_cards (ap, &list_head, &list_tail, &item_count); } /* * Allocate and fill in the new argument vector, Some Unix's terminate * the list with an extra null pointer. */ argv = *av = calloc (item_count+1, sizeof (char *)); for (j = 0; j < item_count; ++j, list_head = list_head->next) argv [j] = list_head->value; *ac = item_count; if (cmargv != NULL) { char subcmd [1024]; if (out != NULL) { errno = EINVAL; perror ("Invalid '|' and '>' specified"); exit (EXIT_ERR); } strcpy (subcmd, cmargv [0]); for (j = 1; j < cmargc; ++j) { strcat (subcmd, " \""); strcat (subcmd, cmargv [j]); strcat (subcmd, "\""); } out = pipe_and_fork (subcmd); } if ((in != NULL) && (NULL == freopen (in, "r", stdin, "mbc=32", "mbf=2"))) { perror (in); /* Can't find file */ exit (EXIT_ERR); /* Is a fatal error */ } if ((out != NULL) && (NULL == freopen (out, outmode, stdout, "mbc=32", "mbf=2"))) { perror (ap); /* Error, can't write or append */ exit (EXIT_ERR); /* Is a fatal error */ } #ifdef DEBUG fprintf (stderr, "Arglist:\n"); for (j = 0; j < *ac; ++j) fprintf (stderr, "argv[%d] = '%s'\n", j, argv [j]); #endif return 0; } static add_item (head, tail, value, count) struct list_item **head; struct list_item **tail; char *value; int *count; { if (*head == 0) { if (NULL == (*head = calloc (1, sizeof (**head)))) { errno = ENOMEM; perror (""); exit (EXIT_ERR); } *tail = *head; } else if (NULL == ((*tail)->next = calloc (1, sizeof (**head)))) { errno = ENOMEM; perror (""); exit (EXIT_ERR); } else *tail = (*tail)->next; (*tail)->value = value; ++ (*count); } static expand_wild_cards (item, head, tail, count) char *item; struct list_item **head; struct list_item **tail; int *count; { int expcount = 0; int context = 0; int status; int status_value; char *had_version; int had_path; $DESCRIPTOR (filespec, item); /*$DESCRIPTOR (defaultspec, "SYS$DISK:[]*.*;");*/ $DESCRIPTOR (defaultspec, ""); $DESCRIPTOR (resultspec, ""); if (strcspn (item, "*%") == strlen (item)) { add_item (head, tail, item, count); return; } resultspec.dsc$b_dtype = DSC$K_DTYPE_T; resultspec.dsc$b_class = DSC$K_CLASS_D; resultspec.dsc$a_pointer = NULL; filespec.dsc$w_length = strlen (item); /* * Only return version specs, if the caller specified a version */ had_version = strchr (item, ';'); /* * Only return full path if the caller specified a path */ had_path = (strchr (item, ']') || strchr (item, ':')); while (1 == (1&lib$find_file (&filespec, &resultspec, &context, &defaultspec, 0, &status_value, &0))) { char *string; char *c; if (NULL == (string = calloc (1, resultspec.dsc$w_length+1))) { errno = ENOMEM; perror (""); exit (EXIT_ERR); } strncpy (string, resultspec.dsc$a_pointer, resultspec.dsc$w_length); string [resultspec.dsc$w_length] = '\0'; if (NULL == had_version) *((char *) strrchr (string, ';')) = '\0'; if (!had_path) { char *s = strrchr (string, ']'); if ( s == NULL ) s = strrchr (string, ':'); if ( s != NULL ) strcpy (string, s+1); } /* * Be consistent with what the C RTL has already done to the rest of * the argv items and lowercase all of these names. */ for (c = string; *c; ++c) if (isupper (*c)) *c = tolower (*c); add_item (head, tail, string, count); ++expcount; } if (expcount == 0) add_item (head, tail, item, count); lib$sfree1_dd (&resultspec); lib$find_file_end (&context); } static int child_st [2]; /* Event Flag set when child process completes */ static short child_chan;/* I/O Channel for Pipe Mailbox */ static exit_handler (status) int *status; { short iosb [4]; if (0 == child_st [0]) { #ifdef DEBUG fprintf (stderr, "Waiting for Child Process to Finnish . . .\n"); #endif sys$qiow (0, child_chan, IO$_WRITEOF, iosb, 0, 0, 0, 0, 0, 0, 0, 0); sys$dassgn (child_chan); fclose (stdout); sys$synch (0, child_st); } } static sig_child (chan) int chan; { #ifdef DEBUG fprintf (stderr, "Child Completion AST\n"); #endif if (child_st [0] == 0) child_st [0] = 1; } static struct exit_control_block { struct exit_control_block *flink; int (*exit_routine) (); int arg_count; int *status_address; int exit_status; } exit_block = { 0, exit_handler, 1, &exit_block.exit_status, 0 }; static char *pipe_and_fork (cmd) char *cmd; { $DESCRIPTOR (cmddsc, cmd); static char mbxname [64]; $DESCRIPTOR (mbxdsc, mbxname); short iosb [4]; int status; int pid; struct { short dna_buflen; short dna_itmcod; char *dna_buffer; unsigned short *dna_retlen; int listend; } itmlst = { sizeof (mbxname), DVI$_DEVNAM, mbxname, &mbxdsc.dsc$w_length, 0 }; int mbxsize; struct { short mbf_buflen; short mbf_itmcod; int *mbf_maxbuf; unsigned short *mbf_retlen; int listend; } syiitmlst = { sizeof (mbxsize), SYI$_MAXBUF, &mbxsize, 0, 0 }; cmddsc.dsc$w_length = strlen (cmd); /* * Get the SYSGEN parameter MAXBUF, and the smaller of it and 2048 as * the size of the 'pipe' mailbox. */ if (1 == (1& (vaxc$errno = sys$getsyiw (0, 0, 0, &syiitmlst, iosb, 0, 0, 0)))) vaxc$errno = iosb [0]; if (0 == (1&vaxc$errno)) { errno = EVMSERR; perror ("Can't get SYSGEN parameter value for MAXBUF"); exit (EXIT_ERR); } if (mbxsize > 2048) mbxsize = 2048; if (0 == (1& (vaxc$errno = sys$crembx (0, &child_chan, mbxsize, mbxsize, 0, 0, 0)))) { errno = EVMSERR; perror ("Can't create pipe mailbox"); exit (EXIT_ERR); } if (1 == (1& (vaxc$errno = sys$getdviw (0, child_chan, 0, &itmlst, iosb, 0, 0, 0)))) vaxc$errno = iosb [0]; if (0 == (1&vaxc$errno)) { errno = EVMSERR; perror ("Can't get pipe mailbox device name"); exit (EXIT_ERR); } mbxname [mbxdsc.dsc$w_length] = '\0'; #ifdef DEBUG fprintf (stderr, "Pipe Mailbox Name = '%s'\n", mbxname); #endif if (0 == (1& (vaxc$errno = lib$spawn (&cmddsc, &mbxdsc, 0, &1, 0, &pid, child_st, &0, sig_child, &child_chan)))) { errno = EVMSERR; perror ("Can't spawn subprocess"); exit (EXIT_ERR); } #ifdef DEBUG fprintf (stderr, "Subprocess's Pid = %08X\n", pid); #endif sys$dclexh (&exit_block); return (mbxname); } background_process (argc, argv) int argc; char **argv; { char command [2048] = "$"; $DESCRIPTOR (value, command); $DESCRIPTOR (cmd, "BACKGROUND$COMMAND"); $DESCRIPTOR (null, "NLA0:"); int pid; strcat (command, argv [0]); while (--argc) { strcat (command, " \""); strcat (command, *(++argv)); strcat (command, "\""); } value.dsc$w_length = strlen (command); if (0 == (1& (vaxc$errno = lib$set_symbol (&cmd, &value)))) { errno = EVMSERR; perror ("Can't create symbol for subprocess command"); exit (EXIT_ERR); } if (0 == (1& (vaxc$errno = lib$spawn (&cmd, &null, 0, &17, 0, &pid)))) { errno = EVMSERR; perror ("Can't spawn subprocess"); exit (EXIT_ERR); } #ifdef DEBUG fprintf (stderr, "%s\n", command); #endif fprintf (stderr, "%08X\n", pid); return (EXIT_OK); } /* vi:set tabstop=4 shiftwidth=4: */
gpl-2.0
DSMan195276/protura-gcc
libgcc/config/libbid/bid128_to_int32.c
48
132857
/* Copyright (C) 2007-2015 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "bid_internal.h" /***************************************************************************** * BID128_to_int32_rnint ****************************************************************************/ BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (int, bid128_to_int32_rnint, x) int res; UINT64 x_sign; UINT64 x_exp; int exp; // unbiased exponent // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64) UINT64 tmp64; BID_UI64DOUBLE tmp1; unsigned int x_nr_bits; int q, ind, shift; UINT128 C1, C; UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits UINT256 fstar; UINT256 P256; // unpack x x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions C1.w[1] = x.w[1] & MASK_COEFF; C1.w[0] = x.w[0]; // check for NaN or Infinity if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) { // x is special if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is QNaN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } else { // x is not a NaN, so it must be infinity if (!x_sign) { // x is +inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is -inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } } // check for non-canonical values (after the check for special values) if ((C1.w[1] > 0x0001ed09bead87c0ull) || (C1.w[1] == 0x0001ed09bead87c0ull && (C1.w[0] > 0x378d8e63ffffffffull)) || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) { res = 0x00000000; BID_RETURN (res); } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) { // x is 0 res = 0x00000000; BID_RETURN (res); } else { // x is not special and is not zero // q = nr. of decimal digits in x // determine first the nr. of bits in x if (C1.w[1] == 0) { if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53 // split the 64-bit value in two 32-bit halves to avoid rounding errors if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion x_nr_bits = 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } else { // x < 2^32 tmp1.d = (double) (C1.w[0]); // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // if x < 2^53 tmp1.d = (double) C1.w[0]; // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1]) tmp1.d = (double) C1.w[1]; // exact conversion x_nr_bits = 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } q = nr_digits[x_nr_bits - 1].digits; if (q == 0) { q = nr_digits[x_nr_bits - 1].digits1; if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo)) q++; } exp = (x_exp >> 49) - 6176; if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits) // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1) // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2... // so x rounded to an integer may or may not fit in a signed 32-bit int // the cases that do not fit are identified here; the ones that fit // fall through and will be handled with other cases further, // under '1 <= q + exp <= 10' if (x_sign) { // if n < 0 and q + exp = 10 // if n < -2^31 - 1/2 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31+1/2 // <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000005, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 > 0x500000005ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000005 <=> // C > 0x500000005 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31+1/2 up) tmp64 = 0x500000005ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } else { // if n > 0 and q + exp = 10 // if n >= 2^31 - 1/2 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31-1/2 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x4fffffffbull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb <=> // C >= 0x4fffffffb * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31-1/2 up) tmp64 = 0x4fffffffbull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } } // n is not too large to be converted to int32: -2^31 - 1/2 < n < 2^31 - 1/2 // Note: some of the cases tested for above fall through to this point if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1) // return 0 res = 0x00000000; BID_RETURN (res); } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1) // if 0.c(0)c(1)...c(q-1) <= 0.5 <=> c(0)c(1)...c(q-1) <= 5 * 10^(q-1) // res = 0 // else // res = +/-1 ind = q - 1; if (ind <= 18) { // 0 <= ind <= 18 if ((C1.w[1] == 0) && (C1.w[0] <= midpoint64[ind])) { res = 0x00000000; // return 0 } else if (x_sign) { // n < 0 res = 0xffffffff; // return -1 } else { // n > 0 res = 0x00000001; // return +1 } } else { // 19 <= ind <= 33 if ((C1.w[1] < midpoint128[ind - 19].w[1]) || ((C1.w[1] == midpoint128[ind - 19].w[1]) && (C1.w[0] <= midpoint128[ind - 19].w[0]))) { res = 0x00000000; // return 0 } else if (x_sign) { // n < 0 res = 0xffffffff; // return -1 } else { // n > 0 res = 0x00000001; // return +1 } } } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9) // -2^31-1/2 <= x <= -1 or 1 <= x < 2^31-1/2 so x can be rounded // to nearest to a 32-bit signed integer if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x' // chop off ind digits from the lower part of C1 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits tmp64 = C1.w[0]; if (ind <= 19) { C1.w[0] = C1.w[0] + midpoint64[ind - 1]; } else { C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0]; C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1]; } if (C1.w[0] < tmp64) C1.w[1]++; // calculate C* and f* // C* is actually floor(C*) in this case // C* and f* need shifting and masking, as shown by // shiftright128[] and maskhigh128[] // 1 <= x <= 33 // kx = 10^(-x) = ten2mk128[ind - 1] // C* = (C1 + 1/2 * 10^x) * 10^(-x) // the approximation of 10^(-x) was rounded up to 118 bits __mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]); if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[1] = P256.w[3]; Cstar.w[0] = P256.w[2]; fstar.w[3] = 0; fstar.w[2] = P256.w[2] & maskhigh128[ind - 1]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } else { // 22 <= ind - 1 <= 33 Cstar.w[1] = 0; Cstar.w[0] = P256.w[3]; fstar.w[3] = P256.w[3] & maskhigh128[ind - 1]; fstar.w[2] = P256.w[2]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g. // if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999 // if (0 < f* < 10^(-x)) then the result is a midpoint // if floor(C*) is even then C* = floor(C*) - logical right // shift; C* has p decimal digits, correct by Prop. 1) // else if floor(C*) is odd C* = floor(C*)-1 (logical right // shift; C* has p decimal digits, correct by Pr. 1) // else // C* = floor(C*) (logical right shift; C has p decimal digits, // correct by Property 1) // n = C* * 10^(e+x) // shift right C* by Ex-128 = shiftright128[ind] shift = shiftright128[ind - 1]; // 0 <= shift <= 102 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[0] = (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift)); // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift); } else { // 22 <= ind - 1 <= 33 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38 } // if the result was a midpoint it was rounded away from zero, so // it will need a correction // check for midpoints if ((fstar.w[3] == 0) && (fstar.w[2] == 0) && (fstar.w[1] || fstar.w[0]) && (fstar.w[1] < ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) { // the result is a midpoint; round to nearest if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD] // if floor(C*) is odd C = floor(C*) - 1; the result >= 1 Cstar.w[0]--; // Cstar.w[0] is now even } // else MP in [ODD, EVEN] } if (x_sign) res = -Cstar.w[0]; else res = Cstar.w[0]; } else if (exp == 0) { // 1 <= q <= 10 // res = +/-C (exact) if (x_sign) res = -C1.w[0]; else res = C1.w[0]; } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10 // res = +/-C * 10^exp (exact) if (x_sign) res = -C1.w[0] * ten2k64[exp]; else res = C1.w[0] * ten2k64[exp]; } } } BID_RETURN (res); } /***************************************************************************** * BID128_to_int32_xrnint ****************************************************************************/ BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (int, bid128_to_int32_xrnint, x) int res; UINT64 x_sign; UINT64 x_exp; int exp; // unbiased exponent // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64) UINT64 tmp64, tmp64A; BID_UI64DOUBLE tmp1; unsigned int x_nr_bits; int q, ind, shift; UINT128 C1, C; UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits UINT256 fstar; UINT256 P256; // unpack x x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions C1.w[1] = x.w[1] & MASK_COEFF; C1.w[0] = x.w[0]; // check for NaN or Infinity if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) { // x is special if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is QNaN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } else { // x is not a NaN, so it must be infinity if (!x_sign) { // x is +inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is -inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } } // check for non-canonical values (after the check for special values) if ((C1.w[1] > 0x0001ed09bead87c0ull) || (C1.w[1] == 0x0001ed09bead87c0ull && (C1.w[0] > 0x378d8e63ffffffffull)) || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) { res = 0x00000000; BID_RETURN (res); } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) { // x is 0 res = 0x00000000; BID_RETURN (res); } else { // x is not special and is not zero // q = nr. of decimal digits in x // determine first the nr. of bits in x if (C1.w[1] == 0) { if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53 // split the 64-bit value in two 32-bit halves to avoid rounding errors if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion x_nr_bits = 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } else { // x < 2^32 tmp1.d = (double) (C1.w[0]); // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // if x < 2^53 tmp1.d = (double) C1.w[0]; // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1]) tmp1.d = (double) C1.w[1]; // exact conversion x_nr_bits = 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } q = nr_digits[x_nr_bits - 1].digits; if (q == 0) { q = nr_digits[x_nr_bits - 1].digits1; if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo)) q++; } exp = (x_exp >> 49) - 6176; if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits) // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1) // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2... // so x rounded to an integer may or may not fit in a signed 32-bit int // the cases that do not fit are identified here; the ones that fit // fall through and will be handled with other cases further, // under '1 <= q + exp <= 10' if (x_sign) { // if n < 0 and q + exp = 10 // if n < -2^31 - 1/2 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31+1/2 // <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000005, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 > 0x500000005ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000005 <=> // C > 0x500000005 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31+1/2 up) tmp64 = 0x500000005ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } else { // if n > 0 and q + exp = 10 // if n >= 2^31 - 1/2 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31-1/2 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x4fffffffbull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb <=> // C >= 0x4fffffffb * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31-1/2 up) tmp64 = 0x4fffffffbull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } } // n is not too large to be converted to int32: -2^31 - 1/2 < n < 2^31 - 1/2 // Note: some of the cases tested for above fall through to this point if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1) // set inexact flag *pfpsf |= INEXACT_EXCEPTION; // return 0 res = 0x00000000; BID_RETURN (res); } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1) // if 0.c(0)c(1)...c(q-1) <= 0.5 <=> c(0)c(1)...c(q-1) <= 5 * 10^(q-1) // res = 0 // else // res = +/-1 ind = q - 1; if (ind <= 18) { // 0 <= ind <= 18 if ((C1.w[1] == 0) && (C1.w[0] <= midpoint64[ind])) { res = 0x00000000; // return 0 } else if (x_sign) { // n < 0 res = 0xffffffff; // return -1 } else { // n > 0 res = 0x00000001; // return +1 } } else { // 19 <= ind <= 33 if ((C1.w[1] < midpoint128[ind - 19].w[1]) || ((C1.w[1] == midpoint128[ind - 19].w[1]) && (C1.w[0] <= midpoint128[ind - 19].w[0]))) { res = 0x00000000; // return 0 } else if (x_sign) { // n < 0 res = 0xffffffff; // return -1 } else { // n > 0 res = 0x00000001; // return +1 } } // set inexact flag *pfpsf |= INEXACT_EXCEPTION; } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9) // -2^31-1/2 <= x <= -1 or 1 <= x < 2^31-1/2 so x can be rounded // to nearest to a 32-bit signed integer if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x' // chop off ind digits from the lower part of C1 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits tmp64 = C1.w[0]; if (ind <= 19) { C1.w[0] = C1.w[0] + midpoint64[ind - 1]; } else { C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0]; C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1]; } if (C1.w[0] < tmp64) C1.w[1]++; // calculate C* and f* // C* is actually floor(C*) in this case // C* and f* need shifting and masking, as shown by // shiftright128[] and maskhigh128[] // 1 <= x <= 33 // kx = 10^(-x) = ten2mk128[ind - 1] // C* = (C1 + 1/2 * 10^x) * 10^(-x) // the approximation of 10^(-x) was rounded up to 118 bits __mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]); if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[1] = P256.w[3]; Cstar.w[0] = P256.w[2]; fstar.w[3] = 0; fstar.w[2] = P256.w[2] & maskhigh128[ind - 1]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } else { // 22 <= ind - 1 <= 33 Cstar.w[1] = 0; Cstar.w[0] = P256.w[3]; fstar.w[3] = P256.w[3] & maskhigh128[ind - 1]; fstar.w[2] = P256.w[2]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g. // if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999 // if (0 < f* < 10^(-x)) then the result is a midpoint // if floor(C*) is even then C* = floor(C*) - logical right // shift; C* has p decimal digits, correct by Prop. 1) // else if floor(C*) is odd C* = floor(C*)-1 (logical right // shift; C* has p decimal digits, correct by Pr. 1) // else // C* = floor(C*) (logical right shift; C has p decimal digits, // correct by Property 1) // n = C* * 10^(e+x) // shift right C* by Ex-128 = shiftright128[ind] shift = shiftright128[ind - 1]; // 0 <= shift <= 102 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[0] = (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift)); // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift); } else { // 22 <= ind - 1 <= 33 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38 } // determine inexactness of the rounding of C* // if (0 < f* - 1/2 < 10^(-x)) then // the result is exact // else // if (f* - 1/2 > T*) then // the result is inexact if (ind - 1 <= 2) { if (fstar.w[1] > 0x8000000000000000ull || (fstar.w[1] == 0x8000000000000000ull && fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2 if (tmp64 > ten2mk128trunc[ind - 1].w[1] || (tmp64 == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } } else if (ind - 1 <= 21) { // if 3 <= ind <= 21 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) || (fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] && (fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[2] - onehalf128[ind - 1]; tmp64A = fstar.w[3]; if (tmp64 > fstar.w[2]) tmp64A--; if (tmp64A || tmp64 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } } else { // if 22 <= ind <= 33 if (fstar.w[3] > onehalf128[ind - 1] || (fstar.w[3] == onehalf128[ind - 1] && (fstar.w[2] || fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[3] - onehalf128[ind - 1]; if (tmp64 || fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } } // if the result was a midpoint it was rounded away from zero, so // it will need a correction // check for midpoints if ((fstar.w[3] == 0) && (fstar.w[2] == 0) && (fstar.w[1] || fstar.w[0]) && (fstar.w[1] < ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) { // the result is a midpoint; round to nearest if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD] // if floor(C*) is odd C = floor(C*) - 1; the result >= 1 Cstar.w[0]--; // Cstar.w[0] is now even } // else MP in [ODD, EVEN] } if (x_sign) res = -Cstar.w[0]; else res = Cstar.w[0]; } else if (exp == 0) { // 1 <= q <= 10 // res = +/-C (exact) if (x_sign) res = -C1.w[0]; else res = C1.w[0]; } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10 // res = +/-C * 10^exp (exact) if (x_sign) res = -C1.w[0] * ten2k64[exp]; else res = C1.w[0] * ten2k64[exp]; } } } BID_RETURN (res); } /***************************************************************************** * BID128_to_int32_floor ****************************************************************************/ BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (int, bid128_to_int32_floor, x) int res; UINT64 x_sign; UINT64 x_exp; int exp; // unbiased exponent // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64) UINT64 tmp64, tmp64A; BID_UI64DOUBLE tmp1; unsigned int x_nr_bits; int q, ind, shift; UINT128 C1, C; UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits UINT256 fstar; UINT256 P256; int is_inexact_lt_midpoint = 0; int is_inexact_gt_midpoint = 0; int is_midpoint_lt_even = 0; int is_midpoint_gt_even = 0; // unpack x x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions C1.w[1] = x.w[1] & MASK_COEFF; C1.w[0] = x.w[0]; // check for NaN or Infinity if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) { // x is special if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is QNaN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } else { // x is not a NaN, so it must be infinity if (!x_sign) { // x is +inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is -inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } } // check for non-canonical values (after the check for special values) if ((C1.w[1] > 0x0001ed09bead87c0ull) || (C1.w[1] == 0x0001ed09bead87c0ull && (C1.w[0] > 0x378d8e63ffffffffull)) || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) { res = 0x00000000; BID_RETURN (res); } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) { // x is 0 res = 0x00000000; BID_RETURN (res); } else { // x is not special and is not zero // q = nr. of decimal digits in x // determine first the nr. of bits in x if (C1.w[1] == 0) { if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53 // split the 64-bit value in two 32-bit halves to avoid rounding errors if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion x_nr_bits = 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } else { // x < 2^32 tmp1.d = (double) (C1.w[0]); // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // if x < 2^53 tmp1.d = (double) C1.w[0]; // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1]) tmp1.d = (double) C1.w[1]; // exact conversion x_nr_bits = 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } q = nr_digits[x_nr_bits - 1].digits; if (q == 0) { q = nr_digits[x_nr_bits - 1].digits1; if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo)) q++; } exp = (x_exp >> 49) - 6176; if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits) // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1) // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2... // so x rounded to an integer may or may not fit in a signed 32-bit int // the cases that do not fit are identified here; the ones that fit // fall through and will be handled with other cases further, // under '1 <= q + exp <= 10' if (x_sign) { // if n < 0 and q + exp = 10 // if n < -2^31 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31 // <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000000, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 > 0x500000000ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000000 <=> // C > 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31 up) tmp64 = 0x500000000ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } else { // if n > 0 and q + exp = 10 // if n >= 2^31 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x500000000ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000 <=> // C >= 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31 up) tmp64 = 0x500000000ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } } // n is not too large to be converted to int32: -2^31 <= n < 2^31 // Note: some of the cases tested for above fall through to this point if ((q + exp) <= 0) { // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1) // return 0 if (x_sign) res = 0xffffffff; else res = 0x00000000; BID_RETURN (res); } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9) // -2^31 <= x <= -1 or 1 <= x < 2^31 so x can be rounded // toward negative infinity to a 32-bit signed integer if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x' // chop off ind digits from the lower part of C1 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits tmp64 = C1.w[0]; if (ind <= 19) { C1.w[0] = C1.w[0] + midpoint64[ind - 1]; } else { C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0]; C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1]; } if (C1.w[0] < tmp64) C1.w[1]++; // calculate C* and f* // C* is actually floor(C*) in this case // C* and f* need shifting and masking, as shown by // shiftright128[] and maskhigh128[] // 1 <= x <= 33 // kx = 10^(-x) = ten2mk128[ind - 1] // C* = (C1 + 1/2 * 10^x) * 10^(-x) // the approximation of 10^(-x) was rounded up to 118 bits __mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]); if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[1] = P256.w[3]; Cstar.w[0] = P256.w[2]; fstar.w[3] = 0; fstar.w[2] = P256.w[2] & maskhigh128[ind - 1]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } else { // 22 <= ind - 1 <= 33 Cstar.w[1] = 0; Cstar.w[0] = P256.w[3]; fstar.w[3] = P256.w[3] & maskhigh128[ind - 1]; fstar.w[2] = P256.w[2]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g. // if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999 // if (0 < f* < 10^(-x)) then the result is a midpoint // if floor(C*) is even then C* = floor(C*) - logical right // shift; C* has p decimal digits, correct by Prop. 1) // else if floor(C*) is odd C* = floor(C*)-1 (logical right // shift; C* has p decimal digits, correct by Pr. 1) // else // C* = floor(C*) (logical right shift; C has p decimal digits, // correct by Property 1) // n = C* * 10^(e+x) // shift right C* by Ex-128 = shiftright128[ind] shift = shiftright128[ind - 1]; // 0 <= shift <= 102 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[0] = (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift)); // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift); } else { // 22 <= ind - 1 <= 33 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38 } // determine inexactness of the rounding of C* // if (0 < f* - 1/2 < 10^(-x)) then // the result is exact // else // if (f* - 1/2 > T*) then // the result is inexact if (ind - 1 <= 2) { if (fstar.w[1] > 0x8000000000000000ull || (fstar.w[1] == 0x8000000000000000ull && fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2 if (tmp64 > ten2mk128trunc[ind - 1].w[1] || (tmp64 == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) { is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 is_inexact_gt_midpoint = 1; } } else if (ind - 1 <= 21) { // if 3 <= ind <= 21 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) || (fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] && (fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[2] - onehalf128[ind - 1]; tmp64A = fstar.w[3]; if (tmp64 > fstar.w[2]) tmp64A--; if (tmp64A || tmp64 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 is_inexact_gt_midpoint = 1; } } else { // if 22 <= ind <= 33 if (fstar.w[3] > onehalf128[ind - 1] || (fstar.w[3] == onehalf128[ind - 1] && (fstar.w[2] || fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[3] - onehalf128[ind - 1]; if (tmp64 || fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 is_inexact_gt_midpoint = 1; } } // if the result was a midpoint it was rounded away from zero, so // it will need a correction // check for midpoints if ((fstar.w[3] == 0) && (fstar.w[2] == 0) && (fstar.w[1] || fstar.w[0]) && (fstar.w[1] < ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) { // the result is a midpoint; round to nearest if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD] // if floor(C*) is odd C = floor(C*) - 1; the result >= 1 Cstar.w[0]--; // Cstar.w[0] is now even is_midpoint_gt_even = 1; is_inexact_lt_midpoint = 0; is_inexact_gt_midpoint = 0; } else { // else MP in [ODD, EVEN] is_midpoint_lt_even = 1; is_inexact_lt_midpoint = 0; is_inexact_gt_midpoint = 0; } } // general correction for RM if (x_sign && (is_midpoint_gt_even || is_inexact_lt_midpoint)) { Cstar.w[0] = Cstar.w[0] + 1; } else if (!x_sign && (is_midpoint_lt_even || is_inexact_gt_midpoint)) { Cstar.w[0] = Cstar.w[0] - 1; } else { ; // the result is already correct } if (x_sign) res = -Cstar.w[0]; else res = Cstar.w[0]; } else if (exp == 0) { // 1 <= q <= 10 // res = +/-C (exact) if (x_sign) res = -C1.w[0]; else res = C1.w[0]; } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10 // res = +/-C * 10^exp (exact) if (x_sign) res = -C1.w[0] * ten2k64[exp]; else res = C1.w[0] * ten2k64[exp]; } } } BID_RETURN (res); } /***************************************************************************** * BID128_to_int32_xfloor ****************************************************************************/ BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (int, bid128_to_int32_xfloor, x) int res; UINT64 x_sign; UINT64 x_exp; int exp; // unbiased exponent // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64) UINT64 tmp64, tmp64A; BID_UI64DOUBLE tmp1; unsigned int x_nr_bits; int q, ind, shift; UINT128 C1, C; UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits UINT256 fstar; UINT256 P256; int is_inexact_lt_midpoint = 0; int is_inexact_gt_midpoint = 0; int is_midpoint_lt_even = 0; int is_midpoint_gt_even = 0; // unpack x x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions C1.w[1] = x.w[1] & MASK_COEFF; C1.w[0] = x.w[0]; // check for NaN or Infinity if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) { // x is special if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is QNaN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } else { // x is not a NaN, so it must be infinity if (!x_sign) { // x is +inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is -inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } } // check for non-canonical values (after the check for special values) if ((C1.w[1] > 0x0001ed09bead87c0ull) || (C1.w[1] == 0x0001ed09bead87c0ull && (C1.w[0] > 0x378d8e63ffffffffull)) || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) { res = 0x00000000; BID_RETURN (res); } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) { // x is 0 res = 0x00000000; BID_RETURN (res); } else { // x is not special and is not zero // q = nr. of decimal digits in x // determine first the nr. of bits in x if (C1.w[1] == 0) { if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53 // split the 64-bit value in two 32-bit halves to avoid rounding errors if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion x_nr_bits = 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } else { // x < 2^32 tmp1.d = (double) (C1.w[0]); // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // if x < 2^53 tmp1.d = (double) C1.w[0]; // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1]) tmp1.d = (double) C1.w[1]; // exact conversion x_nr_bits = 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } q = nr_digits[x_nr_bits - 1].digits; if (q == 0) { q = nr_digits[x_nr_bits - 1].digits1; if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo)) q++; } exp = (x_exp >> 49) - 6176; if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits) // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1) // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2... // so x rounded to an integer may or may not fit in a signed 32-bit int // the cases that do not fit are identified here; the ones that fit // fall through and will be handled with other cases further, // under '1 <= q + exp <= 10' if (x_sign) { // if n < 0 and q + exp = 10 // if n < -2^31 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31 // <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000000, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 > 0x500000000ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x500000000 <=> // C > 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31 up) tmp64 = 0x500000000ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } else { // if n > 0 and q + exp = 10 // if n >= 2^31 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x500000000ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000 <=> // C >= 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31 up) tmp64 = 0x500000000ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } } // n is not too large to be converted to int32: -2^31 <= n < 2^31 // Note: some of the cases tested for above fall through to this point if ((q + exp) <= 0) { // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1) // set inexact flag *pfpsf |= INEXACT_EXCEPTION; // return 0 if (x_sign) res = 0xffffffff; else res = 0x00000000; BID_RETURN (res); } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9) // -2^31 <= x <= -1 or 1 <= x < 2^31 so x can be rounded // toward negative infinity to a 32-bit signed integer if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x' // chop off ind digits from the lower part of C1 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits tmp64 = C1.w[0]; if (ind <= 19) { C1.w[0] = C1.w[0] + midpoint64[ind - 1]; } else { C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0]; C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1]; } if (C1.w[0] < tmp64) C1.w[1]++; // calculate C* and f* // C* is actually floor(C*) in this case // C* and f* need shifting and masking, as shown by // shiftright128[] and maskhigh128[] // 1 <= x <= 33 // kx = 10^(-x) = ten2mk128[ind - 1] // C* = (C1 + 1/2 * 10^x) * 10^(-x) // the approximation of 10^(-x) was rounded up to 118 bits __mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]); if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[1] = P256.w[3]; Cstar.w[0] = P256.w[2]; fstar.w[3] = 0; fstar.w[2] = P256.w[2] & maskhigh128[ind - 1]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } else { // 22 <= ind - 1 <= 33 Cstar.w[1] = 0; Cstar.w[0] = P256.w[3]; fstar.w[3] = P256.w[3] & maskhigh128[ind - 1]; fstar.w[2] = P256.w[2]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g. // if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999 // if (0 < f* < 10^(-x)) then the result is a midpoint // if floor(C*) is even then C* = floor(C*) - logical right // shift; C* has p decimal digits, correct by Prop. 1) // else if floor(C*) is odd C* = floor(C*)-1 (logical right // shift; C* has p decimal digits, correct by Pr. 1) // else // C* = floor(C*) (logical right shift; C has p decimal digits, // correct by Property 1) // n = C* * 10^(e+x) // shift right C* by Ex-128 = shiftright128[ind] shift = shiftright128[ind - 1]; // 0 <= shift <= 102 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[0] = (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift)); // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift); } else { // 22 <= ind - 1 <= 33 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38 } // determine inexactness of the rounding of C* // if (0 < f* - 1/2 < 10^(-x)) then // the result is exact // else // if (f* - 1/2 > T*) then // the result is inexact if (ind - 1 <= 2) { if (fstar.w[1] > 0x8000000000000000ull || (fstar.w[1] == 0x8000000000000000ull && fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2 if (tmp64 > ten2mk128trunc[ind - 1].w[1] || (tmp64 == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_gt_midpoint = 1; } } else if (ind - 1 <= 21) { // if 3 <= ind <= 21 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) || (fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] && (fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[2] - onehalf128[ind - 1]; tmp64A = fstar.w[3]; if (tmp64 > fstar.w[2]) tmp64A--; if (tmp64A || tmp64 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_gt_midpoint = 1; } } else { // if 22 <= ind <= 33 if (fstar.w[3] > onehalf128[ind - 1] || (fstar.w[3] == onehalf128[ind - 1] && (fstar.w[2] || fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[3] - onehalf128[ind - 1]; if (tmp64 || fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_gt_midpoint = 1; } } // if the result was a midpoint it was rounded away from zero, so // it will need a correction // check for midpoints if ((fstar.w[3] == 0) && (fstar.w[2] == 0) && (fstar.w[1] || fstar.w[0]) && (fstar.w[1] < ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) { // the result is a midpoint; round to nearest if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD] // if floor(C*) is odd C = floor(C*) - 1; the result >= 1 Cstar.w[0]--; // Cstar.w[0] is now even is_midpoint_gt_even = 1; is_inexact_lt_midpoint = 0; is_inexact_gt_midpoint = 0; } else { // else MP in [ODD, EVEN] is_midpoint_lt_even = 1; is_inexact_lt_midpoint = 0; is_inexact_gt_midpoint = 0; } } // general correction for RM if (x_sign && (is_midpoint_gt_even || is_inexact_lt_midpoint)) { Cstar.w[0] = Cstar.w[0] + 1; } else if (!x_sign && (is_midpoint_lt_even || is_inexact_gt_midpoint)) { Cstar.w[0] = Cstar.w[0] - 1; } else { ; // the result is already correct } if (x_sign) res = -Cstar.w[0]; else res = Cstar.w[0]; } else if (exp == 0) { // 1 <= q <= 10 // res = +/-C (exact) if (x_sign) res = -C1.w[0]; else res = C1.w[0]; } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10 // res = +/-C * 10^exp (exact) if (x_sign) res = -C1.w[0] * ten2k64[exp]; else res = C1.w[0] * ten2k64[exp]; } } } BID_RETURN (res); } /***************************************************************************** * BID128_to_int32_ceil ****************************************************************************/ BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (int, bid128_to_int32_ceil, x) int res; UINT64 x_sign; UINT64 x_exp; int exp; // unbiased exponent // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64) UINT64 tmp64, tmp64A; BID_UI64DOUBLE tmp1; unsigned int x_nr_bits; int q, ind, shift; UINT128 C1, C; UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits UINT256 fstar; UINT256 P256; int is_inexact_lt_midpoint = 0; int is_inexact_gt_midpoint = 0; int is_midpoint_lt_even = 0; int is_midpoint_gt_even = 0; // unpack x x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions C1.w[1] = x.w[1] & MASK_COEFF; C1.w[0] = x.w[0]; // check for NaN or Infinity if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) { // x is special if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is QNaN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } else { // x is not a NaN, so it must be infinity if (!x_sign) { // x is +inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is -inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } } // check for non-canonical values (after the check for special values) if ((C1.w[1] > 0x0001ed09bead87c0ull) || (C1.w[1] == 0x0001ed09bead87c0ull && (C1.w[0] > 0x378d8e63ffffffffull)) || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) { res = 0x00000000; BID_RETURN (res); } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) { // x is 0 res = 0x00000000; BID_RETURN (res); } else { // x is not special and is not zero // q = nr. of decimal digits in x // determine first the nr. of bits in x if (C1.w[1] == 0) { if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53 // split the 64-bit value in two 32-bit halves to avoid rounding errors if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion x_nr_bits = 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } else { // x < 2^32 tmp1.d = (double) (C1.w[0]); // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // if x < 2^53 tmp1.d = (double) C1.w[0]; // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1]) tmp1.d = (double) C1.w[1]; // exact conversion x_nr_bits = 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } q = nr_digits[x_nr_bits - 1].digits; if (q == 0) { q = nr_digits[x_nr_bits - 1].digits1; if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo)) q++; } exp = (x_exp >> 49) - 6176; if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits) // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1) // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2... // so x rounded to an integer may or may not fit in a signed 32-bit int // the cases that do not fit are identified here; the ones that fit // fall through and will be handled with other cases further, // under '1 <= q + exp <= 10' if (x_sign) { // if n < 0 and q + exp = 10 // if n <= -2^31-1 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x50000000aull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a <=> // C >= 0x50000000a * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31+1 up) tmp64 = 0x50000000aull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } else { // if n > 0 and q + exp = 10 // if n > 2^31 - 1 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31 - 1 // too large if 0.c(0)c(1)...c(q-1) * 10^11 > 0x4fffffff6, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 > 0x4fffffff6ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x4fffffff6 <=> // C > 0x4fffffff6 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31 up) tmp64 = 0x4fffffff6ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } } // n is not too large to be converted to int32: -2^31-1 < n <= 2^31-1 // Note: some of the cases tested for above fall through to this point if ((q + exp) <= 0) { // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1) // return 0 if (x_sign) res = 0x00000000; else res = 0x00000001; BID_RETURN (res); } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9) // -2^31-1 < x <= -1 or 1 <= x <= 2^31-1 so x can be rounded // toward positive infinity to a 32-bit signed integer if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x' // chop off ind digits from the lower part of C1 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits tmp64 = C1.w[0]; if (ind <= 19) { C1.w[0] = C1.w[0] + midpoint64[ind - 1]; } else { C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0]; C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1]; } if (C1.w[0] < tmp64) C1.w[1]++; // calculate C* and f* // C* is actually floor(C*) in this case // C* and f* need shifting and masking, as shown by // shiftright128[] and maskhigh128[] // 1 <= x <= 33 // kx = 10^(-x) = ten2mk128[ind - 1] // C* = (C1 + 1/2 * 10^x) * 10^(-x) // the approximation of 10^(-x) was rounded up to 118 bits __mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]); if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[1] = P256.w[3]; Cstar.w[0] = P256.w[2]; fstar.w[3] = 0; fstar.w[2] = P256.w[2] & maskhigh128[ind - 1]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } else { // 22 <= ind - 1 <= 33 Cstar.w[1] = 0; Cstar.w[0] = P256.w[3]; fstar.w[3] = P256.w[3] & maskhigh128[ind - 1]; fstar.w[2] = P256.w[2]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g. // if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999 // if (0 < f* < 10^(-x)) then the result is a midpoint // if floor(C*) is even then C* = floor(C*) - logical right // shift; C* has p decimal digits, correct by Prop. 1) // else if floor(C*) is odd C* = floor(C*)-1 (logical right // shift; C* has p decimal digits, correct by Pr. 1) // else // C* = floor(C*) (logical right shift; C has p decimal digits, // correct by Property 1) // n = C* * 10^(e+x) // shift right C* by Ex-128 = shiftright128[ind] shift = shiftright128[ind - 1]; // 0 <= shift <= 102 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[0] = (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift)); // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift); } else { // 22 <= ind - 1 <= 33 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38 } // determine inexactness of the rounding of C* // if (0 < f* - 1/2 < 10^(-x)) then // the result is exact // else // if (f* - 1/2 > T*) then // the result is inexact if (ind - 1 <= 2) { if (fstar.w[1] > 0x8000000000000000ull || (fstar.w[1] == 0x8000000000000000ull && fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2 if (tmp64 > ten2mk128trunc[ind - 1].w[1] || (tmp64 == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) { is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 is_inexact_gt_midpoint = 1; } } else if (ind - 1 <= 21) { // if 3 <= ind <= 21 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) || (fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] && (fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[2] - onehalf128[ind - 1]; tmp64A = fstar.w[3]; if (tmp64 > fstar.w[2]) tmp64A--; if (tmp64A || tmp64 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 is_inexact_gt_midpoint = 1; } } else { // if 22 <= ind <= 33 if (fstar.w[3] > onehalf128[ind - 1] || (fstar.w[3] == onehalf128[ind - 1] && (fstar.w[2] || fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[3] - onehalf128[ind - 1]; if (tmp64 || fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 is_inexact_gt_midpoint = 1; } } // if the result was a midpoint it was rounded away from zero, so // it will need a correction // check for midpoints if ((fstar.w[3] == 0) && (fstar.w[2] == 0) && (fstar.w[1] || fstar.w[0]) && (fstar.w[1] < ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) { // the result is a midpoint; round to nearest if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD] // if floor(C*) is odd C = floor(C*) - 1; the result >= 1 Cstar.w[0]--; // Cstar.w[0] is now even is_midpoint_gt_even = 1; is_inexact_lt_midpoint = 0; is_inexact_gt_midpoint = 0; } else { // else MP in [ODD, EVEN] is_midpoint_lt_even = 1; is_inexact_lt_midpoint = 0; is_inexact_gt_midpoint = 0; } } // general correction for RM if (x_sign && (is_midpoint_lt_even || is_inexact_gt_midpoint)) { Cstar.w[0] = Cstar.w[0] - 1; } else if (!x_sign && (is_midpoint_gt_even || is_inexact_lt_midpoint)) { Cstar.w[0] = Cstar.w[0] + 1; } else { ; // the result is already correct } if (x_sign) res = -Cstar.w[0]; else res = Cstar.w[0]; } else if (exp == 0) { // 1 <= q <= 10 // res = +/-C (exact) if (x_sign) res = -C1.w[0]; else res = C1.w[0]; } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10 // res = +/-C * 10^exp (exact) if (x_sign) res = -C1.w[0] * ten2k64[exp]; else res = C1.w[0] * ten2k64[exp]; } } } BID_RETURN (res); } /***************************************************************************** * BID128_to_int32_xceil ****************************************************************************/ BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (int, bid128_to_int32_xceil, x) int res; UINT64 x_sign; UINT64 x_exp; int exp; // unbiased exponent // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64) UINT64 tmp64, tmp64A; BID_UI64DOUBLE tmp1; unsigned int x_nr_bits; int q, ind, shift; UINT128 C1, C; UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits UINT256 fstar; UINT256 P256; int is_inexact_lt_midpoint = 0; int is_inexact_gt_midpoint = 0; int is_midpoint_lt_even = 0; int is_midpoint_gt_even = 0; // unpack x x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions C1.w[1] = x.w[1] & MASK_COEFF; C1.w[0] = x.w[0]; // check for NaN or Infinity if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) { // x is special if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is QNaN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } else { // x is not a NaN, so it must be infinity if (!x_sign) { // x is +inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is -inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } } // check for non-canonical values (after the check for special values) if ((C1.w[1] > 0x0001ed09bead87c0ull) || (C1.w[1] == 0x0001ed09bead87c0ull && (C1.w[0] > 0x378d8e63ffffffffull)) || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) { res = 0x00000000; BID_RETURN (res); } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) { // x is 0 res = 0x00000000; BID_RETURN (res); } else { // x is not special and is not zero // q = nr. of decimal digits in x // determine first the nr. of bits in x if (C1.w[1] == 0) { if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53 // split the 64-bit value in two 32-bit halves to avoid rounding errors if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion x_nr_bits = 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } else { // x < 2^32 tmp1.d = (double) (C1.w[0]); // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // if x < 2^53 tmp1.d = (double) C1.w[0]; // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1]) tmp1.d = (double) C1.w[1]; // exact conversion x_nr_bits = 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } q = nr_digits[x_nr_bits - 1].digits; if (q == 0) { q = nr_digits[x_nr_bits - 1].digits1; if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo)) q++; } exp = (x_exp >> 49) - 6176; if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits) // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1) // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2... // so x rounded to an integer may or may not fit in a signed 32-bit int // the cases that do not fit are identified here; the ones that fit // fall through and will be handled with other cases further, // under '1 <= q + exp <= 10' if (x_sign) { // if n < 0 and q + exp = 10 // if n <= -2^31-1 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x50000000aull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a <=> // C >= 0x50000000a * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31+1 up) tmp64 = 0x50000000aull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } else { // if n > 0 and q + exp = 10 // if n > 2^31 - 1 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^31 - 1 // too large if 0.c(0)c(1)...c(q-1) * 10^11 > 0x4fffffff6, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 > 0x4fffffff6ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 > 0x4fffffff6 <=> // C > 0x4fffffff6 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31 up) tmp64 = 0x4fffffff6ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } } // n is not too large to be converted to int32: -2^31-1 < n <= 2^31-1 // Note: some of the cases tested for above fall through to this point if ((q + exp) <= 0) { // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1) // set inexact flag *pfpsf |= INEXACT_EXCEPTION; // return 0 if (x_sign) res = 0x00000000; else res = 0x00000001; BID_RETURN (res); } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9) // -2^31-1 < x <= -1 or 1 <= x <= 2^31-1 so x can be rounded // toward positive infinity to a 32-bit signed integer if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x' // chop off ind digits from the lower part of C1 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits tmp64 = C1.w[0]; if (ind <= 19) { C1.w[0] = C1.w[0] + midpoint64[ind - 1]; } else { C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0]; C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1]; } if (C1.w[0] < tmp64) C1.w[1]++; // calculate C* and f* // C* is actually floor(C*) in this case // C* and f* need shifting and masking, as shown by // shiftright128[] and maskhigh128[] // 1 <= x <= 33 // kx = 10^(-x) = ten2mk128[ind - 1] // C* = (C1 + 1/2 * 10^x) * 10^(-x) // the approximation of 10^(-x) was rounded up to 118 bits __mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]); if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[1] = P256.w[3]; Cstar.w[0] = P256.w[2]; fstar.w[3] = 0; fstar.w[2] = P256.w[2] & maskhigh128[ind - 1]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } else { // 22 <= ind - 1 <= 33 Cstar.w[1] = 0; Cstar.w[0] = P256.w[3]; fstar.w[3] = P256.w[3] & maskhigh128[ind - 1]; fstar.w[2] = P256.w[2]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g. // if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999 // if (0 < f* < 10^(-x)) then the result is a midpoint // if floor(C*) is even then C* = floor(C*) - logical right // shift; C* has p decimal digits, correct by Prop. 1) // else if floor(C*) is odd C* = floor(C*)-1 (logical right // shift; C* has p decimal digits, correct by Pr. 1) // else // C* = floor(C*) (logical right shift; C has p decimal digits, // correct by Property 1) // n = C* * 10^(e+x) // shift right C* by Ex-128 = shiftright128[ind] shift = shiftright128[ind - 1]; // 0 <= shift <= 102 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[0] = (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift)); // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift); } else { // 22 <= ind - 1 <= 33 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38 } // determine inexactness of the rounding of C* // if (0 < f* - 1/2 < 10^(-x)) then // the result is exact // else // if (f* - 1/2 > T*) then // the result is inexact if (ind - 1 <= 2) { if (fstar.w[1] > 0x8000000000000000ull || (fstar.w[1] == 0x8000000000000000ull && fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2 if (tmp64 > ten2mk128trunc[ind - 1].w[1] || (tmp64 == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_gt_midpoint = 1; } } else if (ind - 1 <= 21) { // if 3 <= ind <= 21 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) || (fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] && (fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[2] - onehalf128[ind - 1]; tmp64A = fstar.w[3]; if (tmp64 > fstar.w[2]) tmp64A--; if (tmp64A || tmp64 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_gt_midpoint = 1; } } else { // if 22 <= ind <= 33 if (fstar.w[3] > onehalf128[ind - 1] || (fstar.w[3] == onehalf128[ind - 1] && (fstar.w[2] || fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[3] - onehalf128[ind - 1]; if (tmp64 || fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_lt_midpoint = 1; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_gt_midpoint = 1; } } // if the result was a midpoint it was rounded away from zero, so // it will need a correction // check for midpoints if ((fstar.w[3] == 0) && (fstar.w[2] == 0) && (fstar.w[1] || fstar.w[0]) && (fstar.w[1] < ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) { // the result is a midpoint; round to nearest if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD] // if floor(C*) is odd C = floor(C*) - 1; the result >= 1 Cstar.w[0]--; // Cstar.w[0] is now even is_midpoint_gt_even = 1; is_inexact_lt_midpoint = 0; is_inexact_gt_midpoint = 0; } else { // else MP in [ODD, EVEN] is_midpoint_lt_even = 1; is_inexact_lt_midpoint = 0; is_inexact_gt_midpoint = 0; } } // general correction for RM if (x_sign && (is_midpoint_lt_even || is_inexact_gt_midpoint)) { Cstar.w[0] = Cstar.w[0] - 1; } else if (!x_sign && (is_midpoint_gt_even || is_inexact_lt_midpoint)) { Cstar.w[0] = Cstar.w[0] + 1; } else { ; // the result is already correct } if (x_sign) res = -Cstar.w[0]; else res = Cstar.w[0]; } else if (exp == 0) { // 1 <= q <= 10 // res = +/-C (exact) if (x_sign) res = -C1.w[0]; else res = C1.w[0]; } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10 // res = +/-C * 10^exp (exact) if (x_sign) res = -C1.w[0] * ten2k64[exp]; else res = C1.w[0] * ten2k64[exp]; } } } BID_RETURN (res); } /***************************************************************************** * BID128_to_int32_int ****************************************************************************/ BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (int, bid128_to_int32_int, x) int res; UINT64 x_sign; UINT64 x_exp; int exp; // unbiased exponent // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64) UINT64 tmp64, tmp64A; BID_UI64DOUBLE tmp1; unsigned int x_nr_bits; int q, ind, shift; UINT128 C1, C; UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits UINT256 fstar; UINT256 P256; int is_inexact_gt_midpoint = 0; int is_midpoint_lt_even = 0; // unpack x x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions C1.w[1] = x.w[1] & MASK_COEFF; C1.w[0] = x.w[0]; // check for NaN or Infinity if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) { // x is special if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is QNaN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } else { // x is not a NaN, so it must be infinity if (!x_sign) { // x is +inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is -inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } } // check for non-canonical values (after the check for special values) if ((C1.w[1] > 0x0001ed09bead87c0ull) || (C1.w[1] == 0x0001ed09bead87c0ull && (C1.w[0] > 0x378d8e63ffffffffull)) || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) { res = 0x00000000; BID_RETURN (res); } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) { // x is 0 res = 0x00000000; BID_RETURN (res); } else { // x is not special and is not zero // q = nr. of decimal digits in x // determine first the nr. of bits in x if (C1.w[1] == 0) { if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53 // split the 64-bit value in two 32-bit halves to avoid rounding errors if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion x_nr_bits = 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } else { // x < 2^32 tmp1.d = (double) (C1.w[0]); // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // if x < 2^53 tmp1.d = (double) C1.w[0]; // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1]) tmp1.d = (double) C1.w[1]; // exact conversion x_nr_bits = 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } q = nr_digits[x_nr_bits - 1].digits; if (q == 0) { q = nr_digits[x_nr_bits - 1].digits1; if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo)) q++; } exp = (x_exp >> 49) - 6176; if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits) // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1) // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2... // so x rounded to an integer may or may not fit in a signed 32-bit int // the cases that do not fit are identified here; the ones that fit // fall through and will be handled with other cases further, // under '1 <= q + exp <= 10' if (x_sign) { // if n < 0 and q + exp = 10 // if n <= -2^31 - 1 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x50000000aull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a <=> // C >= 0x50000000a * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31+1 up) tmp64 = 0x50000000aull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } else { // if n > 0 and q + exp = 10 // if n >= 2^31 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x500000000ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000 <=> // C >= 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31-1/2 up) tmp64 = 0x500000000ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } } // n is not too large to be converted to int32: -2^31 - 1 < n < 2^31 // Note: some of the cases tested for above fall through to this point if ((q + exp) <= 0) { // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1) // return 0 res = 0x00000000; BID_RETURN (res); } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9) // -2^31-1 < x <= -1 or 1 <= x < 2^31 so x can be rounded // toward zero to a 32-bit signed integer if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x' // chop off ind digits from the lower part of C1 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits tmp64 = C1.w[0]; if (ind <= 19) { C1.w[0] = C1.w[0] + midpoint64[ind - 1]; } else { C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0]; C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1]; } if (C1.w[0] < tmp64) C1.w[1]++; // calculate C* and f* // C* is actually floor(C*) in this case // C* and f* need shifting and masking, as shown by // shiftright128[] and maskhigh128[] // 1 <= x <= 33 // kx = 10^(-x) = ten2mk128[ind - 1] // C* = (C1 + 1/2 * 10^x) * 10^(-x) // the approximation of 10^(-x) was rounded up to 118 bits __mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]); if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[1] = P256.w[3]; Cstar.w[0] = P256.w[2]; fstar.w[3] = 0; fstar.w[2] = P256.w[2] & maskhigh128[ind - 1]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } else { // 22 <= ind - 1 <= 33 Cstar.w[1] = 0; Cstar.w[0] = P256.w[3]; fstar.w[3] = P256.w[3] & maskhigh128[ind - 1]; fstar.w[2] = P256.w[2]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g. // if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999 // if (0 < f* < 10^(-x)) then the result is a midpoint // if floor(C*) is even then C* = floor(C*) - logical right // shift; C* has p decimal digits, correct by Prop. 1) // else if floor(C*) is odd C* = floor(C*)-1 (logical right // shift; C* has p decimal digits, correct by Pr. 1) // else // C* = floor(C*) (logical right shift; C has p decimal digits, // correct by Property 1) // n = C* * 10^(e+x) // shift right C* by Ex-128 = shiftright128[ind] shift = shiftright128[ind - 1]; // 0 <= shift <= 102 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[0] = (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift)); // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift); } else { // 22 <= ind - 1 <= 33 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38 } // determine inexactness of the rounding of C* // if (0 < f* - 1/2 < 10^(-x)) then // the result is exact // else // if (f* - 1/2 > T*) then // the result is inexact if (ind - 1 <= 2) { if (fstar.w[1] > 0x8000000000000000ull || (fstar.w[1] == 0x8000000000000000ull && fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2 if ((tmp64 > ten2mk128trunc[ind - 1].w[1] || (tmp64 == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] >= ten2mk128trunc[ind - 1].w[0]))) { } // else the result is exact } else { // the result is inexact; f2* <= 1/2 is_inexact_gt_midpoint = 1; } } else if (ind - 1 <= 21) { // if 3 <= ind <= 21 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) || (fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] && (fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[2] - onehalf128[ind - 1]; tmp64A = fstar.w[3]; if (tmp64 > fstar.w[2]) tmp64A--; if (tmp64A || tmp64 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { } // else the result is exact } else { // the result is inexact; f2* <= 1/2 is_inexact_gt_midpoint = 1; } } else { // if 22 <= ind <= 33 if (fstar.w[3] > onehalf128[ind - 1] || (fstar.w[3] == onehalf128[ind - 1] && (fstar.w[2] || fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[3] - onehalf128[ind - 1]; if (tmp64 || fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { } // else the result is exact } else { // the result is inexact; f2* <= 1/2 is_inexact_gt_midpoint = 1; } } // if the result was a midpoint it was rounded away from zero, so // it will need a correction // check for midpoints if ((fstar.w[3] == 0) && (fstar.w[2] == 0) && (fstar.w[1] || fstar.w[0]) && (fstar.w[1] < ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) { // the result is a midpoint; round to nearest if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD] // if floor(C*) is odd C = floor(C*) - 1; the result >= 1 Cstar.w[0]--; // Cstar.w[0] is now even is_inexact_gt_midpoint = 0; } else { // else MP in [ODD, EVEN] is_midpoint_lt_even = 1; is_inexact_gt_midpoint = 0; } } // general correction for RZ if (is_midpoint_lt_even || is_inexact_gt_midpoint) { Cstar.w[0] = Cstar.w[0] - 1; } else { ; // exact, the result is already correct } if (x_sign) res = -Cstar.w[0]; else res = Cstar.w[0]; } else if (exp == 0) { // 1 <= q <= 10 // res = +/-C (exact) if (x_sign) res = -C1.w[0]; else res = C1.w[0]; } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10 // res = +/-C * 10^exp (exact) if (x_sign) res = -C1.w[0] * ten2k64[exp]; else res = C1.w[0] * ten2k64[exp]; } } } BID_RETURN (res); } /***************************************************************************** * BID128_to_int32_xint ****************************************************************************/ BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (int, bid128_to_int32_xint, x) int res; UINT64 x_sign; UINT64 x_exp; int exp; // unbiased exponent // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64) UINT64 tmp64, tmp64A; BID_UI64DOUBLE tmp1; unsigned int x_nr_bits; int q, ind, shift; UINT128 C1, C; UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits UINT256 fstar; UINT256 P256; int is_inexact_gt_midpoint = 0; int is_midpoint_lt_even = 0; // unpack x x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions C1.w[1] = x.w[1] & MASK_COEFF; C1.w[0] = x.w[0]; // check for NaN or Infinity if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) { // x is special if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is QNaN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } else { // x is not a NaN, so it must be infinity if (!x_sign) { // x is +inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is -inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } } // check for non-canonical values (after the check for special values) if ((C1.w[1] > 0x0001ed09bead87c0ull) || (C1.w[1] == 0x0001ed09bead87c0ull && (C1.w[0] > 0x378d8e63ffffffffull)) || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) { res = 0x00000000; BID_RETURN (res); } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) { // x is 0 res = 0x00000000; BID_RETURN (res); } else { // x is not special and is not zero // q = nr. of decimal digits in x // determine first the nr. of bits in x if (C1.w[1] == 0) { if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53 // split the 64-bit value in two 32-bit halves to avoid rounding errors if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion x_nr_bits = 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } else { // x < 2^32 tmp1.d = (double) (C1.w[0]); // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // if x < 2^53 tmp1.d = (double) C1.w[0]; // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1]) tmp1.d = (double) C1.w[1]; // exact conversion x_nr_bits = 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } q = nr_digits[x_nr_bits - 1].digits; if (q == 0) { q = nr_digits[x_nr_bits - 1].digits1; if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo)) q++; } exp = (x_exp >> 49) - 6176; if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits) // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1) // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2... // so x rounded to an integer may or may not fit in a signed 32-bit int // the cases that do not fit are identified here; the ones that fit // fall through and will be handled with other cases further, // under '1 <= q + exp <= 10' if (x_sign) { // if n < 0 and q + exp = 10 // if n <= -2^31 - 1 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x50000000aull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a <=> // C >= 0x50000000a * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31+1 up) tmp64 = 0x50000000aull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } else { // if n > 0 and q + exp = 10 // if n >= 2^31 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x500000000ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000000 <=> // C >= 0x500000000 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31-1/2 up) tmp64 = 0x500000000ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } } // n is not too large to be converted to int32: -2^31 - 1 < n < 2^31 // Note: some of the cases tested for above fall through to this point if ((q + exp) <= 0) { // n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1) // set inexact flag *pfpsf |= INEXACT_EXCEPTION; // return 0 res = 0x00000000; BID_RETURN (res); } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9) // -2^31-1 < x <= -1 or 1 <= x < 2^31 so x can be rounded // toward zero to a 32-bit signed integer if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x' // chop off ind digits from the lower part of C1 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits tmp64 = C1.w[0]; if (ind <= 19) { C1.w[0] = C1.w[0] + midpoint64[ind - 1]; } else { C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0]; C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1]; } if (C1.w[0] < tmp64) C1.w[1]++; // calculate C* and f* // C* is actually floor(C*) in this case // C* and f* need shifting and masking, as shown by // shiftright128[] and maskhigh128[] // 1 <= x <= 33 // kx = 10^(-x) = ten2mk128[ind - 1] // C* = (C1 + 1/2 * 10^x) * 10^(-x) // the approximation of 10^(-x) was rounded up to 118 bits __mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]); if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[1] = P256.w[3]; Cstar.w[0] = P256.w[2]; fstar.w[3] = 0; fstar.w[2] = P256.w[2] & maskhigh128[ind - 1]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } else { // 22 <= ind - 1 <= 33 Cstar.w[1] = 0; Cstar.w[0] = P256.w[3]; fstar.w[3] = P256.w[3] & maskhigh128[ind - 1]; fstar.w[2] = P256.w[2]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g. // if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999 // if (0 < f* < 10^(-x)) then the result is a midpoint // if floor(C*) is even then C* = floor(C*) - logical right // shift; C* has p decimal digits, correct by Prop. 1) // else if floor(C*) is odd C* = floor(C*)-1 (logical right // shift; C* has p decimal digits, correct by Pr. 1) // else // C* = floor(C*) (logical right shift; C has p decimal digits, // correct by Property 1) // n = C* * 10^(e+x) // shift right C* by Ex-128 = shiftright128[ind] shift = shiftright128[ind - 1]; // 0 <= shift <= 102 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[0] = (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift)); // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift); } else { // 22 <= ind - 1 <= 33 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38 } // determine inexactness of the rounding of C* // if (0 < f* - 1/2 < 10^(-x)) then // the result is exact // else // if (f* - 1/2 > T*) then // the result is inexact if (ind - 1 <= 2) { if (fstar.w[1] > 0x8000000000000000ull || (fstar.w[1] == 0x8000000000000000ull && fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2 if (tmp64 > ten2mk128trunc[ind - 1].w[1] || (tmp64 == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_gt_midpoint = 1; } } else if (ind - 1 <= 21) { // if 3 <= ind <= 21 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) || (fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] && (fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[2] - onehalf128[ind - 1]; tmp64A = fstar.w[3]; if (tmp64 > fstar.w[2]) tmp64A--; if (tmp64A || tmp64 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_gt_midpoint = 1; } } else { // if 22 <= ind <= 33 if (fstar.w[3] > onehalf128[ind - 1] || (fstar.w[3] == onehalf128[ind - 1] && (fstar.w[2] || fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[3] - onehalf128[ind - 1]; if (tmp64 || fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; is_inexact_gt_midpoint = 1; } } // if the result was a midpoint it was rounded away from zero, so // it will need a correction // check for midpoints if ((fstar.w[3] == 0) && (fstar.w[2] == 0) && (fstar.w[1] || fstar.w[0]) && (fstar.w[1] < ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) { // the result is a midpoint; round to nearest if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD] // if floor(C*) is odd C = floor(C*) - 1; the result >= 1 Cstar.w[0]--; // Cstar.w[0] is now even is_inexact_gt_midpoint = 0; } else { // else MP in [ODD, EVEN] is_midpoint_lt_even = 1; is_inexact_gt_midpoint = 0; } } // general correction for RZ if (is_midpoint_lt_even || is_inexact_gt_midpoint) { Cstar.w[0] = Cstar.w[0] - 1; } else { ; // exact, the result is already correct } if (x_sign) res = -Cstar.w[0]; else res = Cstar.w[0]; } else if (exp == 0) { // 1 <= q <= 10 // res = +/-C (exact) if (x_sign) res = -C1.w[0]; else res = C1.w[0]; } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10 // res = +/-C * 10^exp (exact) if (x_sign) res = -C1.w[0] * ten2k64[exp]; else res = C1.w[0] * ten2k64[exp]; } } } BID_RETURN (res); } /***************************************************************************** * BID128_to_int32_rninta ****************************************************************************/ BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (int, bid128_to_int32_rninta, x) int res; UINT64 x_sign; UINT64 x_exp; int exp; // unbiased exponent // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64) UINT64 tmp64; BID_UI64DOUBLE tmp1; unsigned int x_nr_bits; int q, ind, shift; UINT128 C1, C; UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits UINT256 P256; // unpack x x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions C1.w[1] = x.w[1] & MASK_COEFF; C1.w[0] = x.w[0]; // check for NaN or Infinity if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) { // x is special if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is QNaN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } else { // x is not a NaN, so it must be infinity if (!x_sign) { // x is +inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is -inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } } // check for non-canonical values (after the check for special values) if ((C1.w[1] > 0x0001ed09bead87c0ull) || (C1.w[1] == 0x0001ed09bead87c0ull && (C1.w[0] > 0x378d8e63ffffffffull)) || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) { res = 0x00000000; BID_RETURN (res); } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) { // x is 0 res = 0x00000000; BID_RETURN (res); } else { // x is not special and is not zero // q = nr. of decimal digits in x // determine first the nr. of bits in x if (C1.w[1] == 0) { if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53 // split the 64-bit value in two 32-bit halves to avoid rounding errors if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion x_nr_bits = 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } else { // x < 2^32 tmp1.d = (double) (C1.w[0]); // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // if x < 2^53 tmp1.d = (double) C1.w[0]; // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1]) tmp1.d = (double) C1.w[1]; // exact conversion x_nr_bits = 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } q = nr_digits[x_nr_bits - 1].digits; if (q == 0) { q = nr_digits[x_nr_bits - 1].digits1; if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo)) q++; } exp = (x_exp >> 49) - 6176; if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits) // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1) // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2... // so x rounded to an integer may or may not fit in a signed 32-bit int // the cases that do not fit are identified here; the ones that fit // fall through and will be handled with other cases further, // under '1 <= q + exp <= 10' if (x_sign) { // if n < 0 and q + exp = 10 // if n <= -2^31 - 1/2 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1/2 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000005, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x500000005ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000005 <=> // C >= 0x500000005 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31+1/2 up) tmp64 = 0x500000005ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } else { // if n > 0 and q + exp = 10 // if n >= 2^31 - 1/2 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31-1/2 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x4fffffffbull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb <=> // C >= 0x4fffffffb * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31-1/2 up) tmp64 = 0x4fffffffbull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } } // n is not too large to be converted to int32: -2^31 - 1/2 < n < 2^31 - 1/2 // Note: some of the cases tested for above fall through to this point if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1) // return 0 res = 0x00000000; BID_RETURN (res); } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1) // if 0.c(0)c(1)...c(q-1) < 0.5 <=> c(0)c(1)...c(q-1) < 5 * 10^(q-1) // res = 0 // else // res = +/-1 ind = q - 1; if (ind <= 18) { // 0 <= ind <= 18 if ((C1.w[1] == 0) && (C1.w[0] < midpoint64[ind])) { res = 0x00000000; // return 0 } else if (x_sign) { // n < 0 res = 0xffffffff; // return -1 } else { // n > 0 res = 0x00000001; // return +1 } } else { // 19 <= ind <= 33 if ((C1.w[1] < midpoint128[ind - 19].w[1]) || ((C1.w[1] == midpoint128[ind - 19].w[1]) && (C1.w[0] < midpoint128[ind - 19].w[0]))) { res = 0x00000000; // return 0 } else if (x_sign) { // n < 0 res = 0xffffffff; // return -1 } else { // n > 0 res = 0x00000001; // return +1 } } } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9) // -2^31-1/2 < x <= -1 or 1 <= x < 2^31-1/2 so x can be rounded // to nearest-away to a 32-bit signed integer if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x' // chop off ind digits from the lower part of C1 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits tmp64 = C1.w[0]; if (ind <= 19) { C1.w[0] = C1.w[0] + midpoint64[ind - 1]; } else { C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0]; C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1]; } if (C1.w[0] < tmp64) C1.w[1]++; // calculate C* and f* // C* is actually floor(C*) in this case // C* and f* need shifting and masking, as shown by // shiftright128[] and maskhigh128[] // 1 <= x <= 33 // kx = 10^(-x) = ten2mk128[ind - 1] // C* = (C1 + 1/2 * 10^x) * 10^(-x) // the approximation of 10^(-x) was rounded up to 118 bits __mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]); if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[1] = P256.w[3]; Cstar.w[0] = P256.w[2]; } else { // 22 <= ind - 1 <= 33 Cstar.w[1] = 0; Cstar.w[0] = P256.w[3]; } // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g. // if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999 // if (0 < f* < 10^(-x)) then the result is a midpoint // if floor(C*) is even then C* = floor(C*) - logical right // shift; C* has p decimal digits, correct by Prop. 1) // else if floor(C*) is odd C* = floor(C*)-1 (logical right // shift; C* has p decimal digits, correct by Pr. 1) // else // C* = floor(C*) (logical right shift; C has p decimal digits, // correct by Property 1) // n = C* * 10^(e+x) // shift right C* by Ex-128 = shiftright128[ind] shift = shiftright128[ind - 1]; // 0 <= shift <= 102 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[0] = (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift)); // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift); } else { // 22 <= ind - 1 <= 33 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38 } // if the result was a midpoint, it was already rounded away from zero if (x_sign) res = -Cstar.w[0]; else res = Cstar.w[0]; // no need to check for midpoints - already rounded away from zero! } else if (exp == 0) { // 1 <= q <= 10 // res = +/-C (exact) if (x_sign) res = -C1.w[0]; else res = C1.w[0]; } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10 // res = +/-C * 10^exp (exact) if (x_sign) res = -C1.w[0] * ten2k64[exp]; else res = C1.w[0] * ten2k64[exp]; } } } BID_RETURN (res); } /***************************************************************************** * BID128_to_int32_xrninta ****************************************************************************/ BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (int, bid128_to_int32_xrninta, x) int res; UINT64 x_sign; UINT64 x_exp; int exp; // unbiased exponent // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64) UINT64 tmp64, tmp64A; BID_UI64DOUBLE tmp1; unsigned int x_nr_bits; int q, ind, shift; UINT128 C1, C; UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits UINT256 fstar; UINT256 P256; // unpack x x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions C1.w[1] = x.w[1] & MASK_COEFF; C1.w[0] = x.w[0]; // check for NaN or Infinity if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) { // x is special if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is QNaN // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } else { // x is not a NaN, so it must be infinity if (!x_sign) { // x is +inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } else { // x is -inf // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; } BID_RETURN (res); } } // check for non-canonical values (after the check for special values) if ((C1.w[1] > 0x0001ed09bead87c0ull) || (C1.w[1] == 0x0001ed09bead87c0ull && (C1.w[0] > 0x378d8e63ffffffffull)) || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) { res = 0x00000000; BID_RETURN (res); } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) { // x is 0 res = 0x00000000; BID_RETURN (res); } else { // x is not special and is not zero // q = nr. of decimal digits in x // determine first the nr. of bits in x if (C1.w[1] == 0) { if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53 // split the 64-bit value in two 32-bit halves to avoid rounding errors if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion x_nr_bits = 33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } else { // x < 2^32 tmp1.d = (double) (C1.w[0]); // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // if x < 2^53 tmp1.d = (double) C1.w[0]; // exact conversion x_nr_bits = 1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } } else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1]) tmp1.d = (double) C1.w[1]; // exact conversion x_nr_bits = 65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff); } q = nr_digits[x_nr_bits - 1].digits; if (q == 0) { q = nr_digits[x_nr_bits - 1].digits1; if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo)) q++; } exp = (x_exp >> 49) - 6176; if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits) // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1) // in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2... // so x rounded to an integer may or may not fit in a signed 32-bit int // the cases that do not fit are identified here; the ones that fit // fall through and will be handled with other cases further, // under '1 <= q + exp <= 10' if (x_sign) { // if n < 0 and q + exp = 10 // if n <= -2^31 - 1/2 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31+1/2 // <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000005, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x500000005ull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x500000005 <=> // C >= 0x500000005 * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31+1/2 up) tmp64 = 0x500000005ull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } else { // if n > 0 and q + exp = 10 // if n >= 2^31 - 1/2 then n is too large // too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^31-1/2 // too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb, 1<=q<=34 if (q <= 11) { tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int // c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits) if (tmp64 >= 0x4fffffffbull) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2 // 0.c(0)c(1)...c(q-1) * 10^11 >= 0x4fffffffb <=> // C >= 0x4fffffffb * 10^(q-11) where 1 <= q - 11 <= 23 // (scale 2^31-1/2 up) tmp64 = 0x4fffffffbull; if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits __mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]); } else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits __mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]); } if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) { // set invalid flag *pfpsf |= INVALID_EXCEPTION; // return Integer Indefinite res = 0x80000000; BID_RETURN (res); } // else cases that can be rounded to a 32-bit int fall through // to '1 <= q + exp <= 10' } } } // n is not too large to be converted to int32: -2^31 - 1/2 < n < 2^31 - 1/2 // Note: some of the cases tested for above fall through to this point if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1) // set inexact flag *pfpsf |= INEXACT_EXCEPTION; // return 0 res = 0x00000000; BID_RETURN (res); } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1) // if 0.c(0)c(1)...c(q-1) < 0.5 <=> c(0)c(1)...c(q-1) < 5 * 10^(q-1) // res = 0 // else // res = +/-1 ind = q - 1; if (ind <= 18) { // 0 <= ind <= 18 if ((C1.w[1] == 0) && (C1.w[0] < midpoint64[ind])) { res = 0x00000000; // return 0 } else if (x_sign) { // n < 0 res = 0xffffffff; // return -1 } else { // n > 0 res = 0x00000001; // return +1 } } else { // 19 <= ind <= 33 if ((C1.w[1] < midpoint128[ind - 19].w[1]) || ((C1.w[1] == midpoint128[ind - 19].w[1]) && (C1.w[0] < midpoint128[ind - 19].w[0]))) { res = 0x00000000; // return 0 } else if (x_sign) { // n < 0 res = 0xffffffff; // return -1 } else { // n > 0 res = 0x00000001; // return +1 } } // set inexact flag *pfpsf |= INEXACT_EXCEPTION; } else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9) // -2^31-1/2 < x <= -1 or 1 <= x < 2^31-1/2 so x can be rounded // to nearest-away to a 32-bit signed integer if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10 ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x' // chop off ind digits from the lower part of C1 // C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits tmp64 = C1.w[0]; if (ind <= 19) { C1.w[0] = C1.w[0] + midpoint64[ind - 1]; } else { C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0]; C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1]; } if (C1.w[0] < tmp64) C1.w[1]++; // calculate C* and f* // C* is actually floor(C*) in this case // C* and f* need shifting and masking, as shown by // shiftright128[] and maskhigh128[] // 1 <= x <= 33 // kx = 10^(-x) = ten2mk128[ind - 1] // C* = (C1 + 1/2 * 10^x) * 10^(-x) // the approximation of 10^(-x) was rounded up to 118 bits __mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]); if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[1] = P256.w[3]; Cstar.w[0] = P256.w[2]; fstar.w[3] = 0; fstar.w[2] = P256.w[2] & maskhigh128[ind - 1]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } else { // 22 <= ind - 1 <= 33 Cstar.w[1] = 0; Cstar.w[0] = P256.w[3]; fstar.w[3] = P256.w[3] & maskhigh128[ind - 1]; fstar.w[2] = P256.w[2]; fstar.w[1] = P256.w[1]; fstar.w[0] = P256.w[0]; } // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g. // if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999 // if (0 < f* < 10^(-x)) then the result is a midpoint // if floor(C*) is even then C* = floor(C*) - logical right // shift; C* has p decimal digits, correct by Prop. 1) // else if floor(C*) is odd C* = floor(C*)-1 (logical right // shift; C* has p decimal digits, correct by Pr. 1) // else // C* = floor(C*) (logical right shift; C has p decimal digits, // correct by Property 1) // n = C* * 10^(e+x) // shift right C* by Ex-128 = shiftright128[ind] shift = shiftright128[ind - 1]; // 0 <= shift <= 102 if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21 Cstar.w[0] = (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift)); // redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift); } else { // 22 <= ind - 1 <= 33 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38 } // if the result was a midpoint, it was already rounded away from zero if (x_sign) res = -Cstar.w[0]; else res = Cstar.w[0]; // determine inexactness of the rounding of C* // if (0 < f* - 1/2 < 10^(-x)) then // the result is exact // else // if (f* - 1/2 > T*) then // the result is inexact if (ind - 1 <= 2) { if (fstar.w[1] > 0x8000000000000000ull || (fstar.w[1] == 0x8000000000000000ull && fstar.w[0] > 0x0ull)) { // f* > 1/2 and the result may be exact tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2 if ((tmp64 > ten2mk128trunc[ind - 1].w[1] || (tmp64 == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] >= ten2mk128trunc[ind - 1].w[0]))) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } } else if (ind - 1 <= 21) { // if 3 <= ind <= 21 if (fstar.w[3] > 0x0 || (fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) || (fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] && (fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[2] - onehalf128[ind - 1]; tmp64A = fstar.w[3]; if (tmp64 > fstar.w[2]) tmp64A--; if (tmp64A || tmp64 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } } else { // if 22 <= ind <= 33 if (fstar.w[3] > onehalf128[ind - 1] || (fstar.w[3] == onehalf128[ind - 1] && (fstar.w[2] || fstar.w[1] || fstar.w[0]))) { // f2* > 1/2 and the result may be exact // Calculate f2* - 1/2 tmp64 = fstar.w[3] - onehalf128[ind - 1]; if (tmp64 || fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1] || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) { // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } // else the result is exact } else { // the result is inexact; f2* <= 1/2 // set the inexact flag *pfpsf |= INEXACT_EXCEPTION; } } // no need to check for midpoints - already rounded away from zero! } else if (exp == 0) { // 1 <= q <= 10 // res = +/-C (exact) if (x_sign) res = -C1.w[0]; else res = C1.w[0]; } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10 // res = +/-C * 10^exp (exact) if (x_sign) res = -C1.w[0] * ten2k64[exp]; else res = C1.w[0] * ten2k64[exp]; } } } BID_RETURN (res); }
gpl-2.0
htc-mirror/endeavor-ics-qmr-2.6.39-69960c7
drivers/net/b44.c
304
59280
/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver. * * Copyright (C) 2002 David S. Miller (davem@redhat.com) * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi) * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org) * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org) * Copyright (C) 2006 Broadcom Corporation. * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de> * * Distribute under GPL. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/ssb/ssb.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/irq.h> #include "b44.h" #define DRV_MODULE_NAME "b44" #define DRV_MODULE_VERSION "2.0" #define B44_DEF_MSG_ENABLE \ (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK | \ NETIF_MSG_TIMER | \ NETIF_MSG_IFDOWN | \ NETIF_MSG_IFUP | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) /* length of time before we decide the hardware is borked, * and dev->tx_timeout() should be called to fix the problem */ #define B44_TX_TIMEOUT (5 * HZ) /* hardware minimum and maximum for a single frame's data payload */ #define B44_MIN_MTU 60 #define B44_MAX_MTU 1500 #define B44_RX_RING_SIZE 512 #define B44_DEF_RX_RING_PENDING 200 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \ B44_RX_RING_SIZE) #define B44_TX_RING_SIZE 512 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1) #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \ B44_TX_RING_SIZE) #define TX_RING_GAP(BP) \ (B44_TX_RING_SIZE - (BP)->tx_pending) #define TX_BUFFS_AVAIL(BP) \ (((BP)->tx_cons <= (BP)->tx_prod) ? \ (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \ (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) #define RX_PKT_OFFSET (RX_HEADER_LEN + 2) #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET) /* minimum number of free TX descriptors required to wake up TX process */ #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) /* b44 internal pattern match filter info */ #define B44_PATTERN_BASE 0x400 #define B44_PATTERN_SIZE 0x80 #define B44_PMASK_BASE 0x600 #define B44_PMASK_SIZE 0x10 #define B44_MAX_PATTERNS 16 #define B44_ETHIPV6UDP_HLEN 62 #define B44_ETHIPV4UDP_HLEN 42 static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n"; MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller"); MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */ module_param(b44_debug, int, 0); MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value"); #ifdef CONFIG_B44_PCI static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, { 0 } /* terminate list with empty entry */ }; MODULE_DEVICE_TABLE(pci, b44_pci_tbl); static struct pci_driver b44_pci_driver = { .name = DRV_MODULE_NAME, .id_table = b44_pci_tbl, }; #endif /* CONFIG_B44_PCI */ static const struct ssb_device_id b44_ssb_tbl[] = { SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV), SSB_DEVTABLE_END }; MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl); static void b44_halt(struct b44 *); static void b44_init_rings(struct b44 *); #define B44_FULL_RESET 1 #define B44_FULL_RESET_SKIP_PHY 2 #define B44_PARTIAL_RESET 3 #define B44_CHIP_RESET_FULL 4 #define B44_CHIP_RESET_PARTIAL 5 static void b44_init_hw(struct b44 *, int); static int dma_desc_sync_size; static int instance; static const char b44_gstrings[][ETH_GSTRING_LEN] = { #define _B44(x...) # x, B44_STAT_REG_DECLARE #undef _B44 }; static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, dma_addr_t dma_base, unsigned long offset, enum dma_data_direction dir) { dma_sync_single_for_device(sdev->dma_dev, dma_base + offset, dma_desc_sync_size, dir); } static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, dma_addr_t dma_base, unsigned long offset, enum dma_data_direction dir) { dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset, dma_desc_sync_size, dir); } static inline unsigned long br32(const struct b44 *bp, unsigned long reg) { return ssb_read32(bp->sdev, reg); } static inline void bw32(const struct b44 *bp, unsigned long reg, unsigned long val) { ssb_write32(bp->sdev, reg, val); } static int b44_wait_bit(struct b44 *bp, unsigned long reg, u32 bit, unsigned long timeout, const int clear) { unsigned long i; for (i = 0; i < timeout; i++) { u32 val = br32(bp, reg); if (clear && !(val & bit)) break; if (!clear && (val & bit)) break; udelay(10); } if (i == timeout) { if (net_ratelimit()) netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n", bit, reg, clear ? "clear" : "set"); return -ENODEV; } return 0; } static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index) { u32 val; bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ | (index << CAM_CTRL_INDEX_SHIFT))); b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); val = br32(bp, B44_CAM_DATA_LO); data[2] = (val >> 24) & 0xFF; data[3] = (val >> 16) & 0xFF; data[4] = (val >> 8) & 0xFF; data[5] = (val >> 0) & 0xFF; val = br32(bp, B44_CAM_DATA_HI); data[0] = (val >> 8) & 0xFF; data[1] = (val >> 0) & 0xFF; } static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) { u32 val; val = ((u32) data[2]) << 24; val |= ((u32) data[3]) << 16; val |= ((u32) data[4]) << 8; val |= ((u32) data[5]) << 0; bw32(bp, B44_CAM_DATA_LO, val); val = (CAM_DATA_HI_VALID | (((u32) data[0]) << 8) | (((u32) data[1]) << 0)); bw32(bp, B44_CAM_DATA_HI, val); bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE | (index << CAM_CTRL_INDEX_SHIFT))); b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); } static inline void __b44_disable_ints(struct b44 *bp) { bw32(bp, B44_IMASK, 0); } static void b44_disable_ints(struct b44 *bp) { __b44_disable_ints(bp); /* Flush posted writes. */ br32(bp, B44_IMASK); } static void b44_enable_ints(struct b44 *bp) { bw32(bp, B44_IMASK, bp->imask); } static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val) { int err; bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) | (phy_addr << MDIO_DATA_PMD_SHIFT) | (reg << MDIO_DATA_RA_SHIFT) | (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT))); err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA; return err; } static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val) { bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) | (phy_addr << MDIO_DATA_PMD_SHIFT) | (reg << MDIO_DATA_RA_SHIFT) | (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) | (val & MDIO_DATA_DATA))); return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); } static inline int b44_readphy(struct b44 *bp, int reg, u32 *val) { if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) return 0; return __b44_readphy(bp, bp->phy_addr, reg, val); } static inline int b44_writephy(struct b44 *bp, int reg, u32 val) { if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) return 0; return __b44_writephy(bp, bp->phy_addr, reg, val); } /* miilib interface */ static int b44_mii_read(struct net_device *dev, int phy_id, int location) { u32 val; struct b44 *bp = netdev_priv(dev); int rc = __b44_readphy(bp, phy_id, location, &val); if (rc) return 0xffffffff; return val; } static void b44_mii_write(struct net_device *dev, int phy_id, int location, int val) { struct b44 *bp = netdev_priv(dev); __b44_writephy(bp, phy_id, location, val); } static int b44_phy_reset(struct b44 *bp) { u32 val; int err; if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) return 0; err = b44_writephy(bp, MII_BMCR, BMCR_RESET); if (err) return err; udelay(100); err = b44_readphy(bp, MII_BMCR, &val); if (!err) { if (val & BMCR_RESET) { netdev_err(bp->dev, "PHY Reset would not complete\n"); err = -ENODEV; } } return err; } static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags) { u32 val; bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE); bp->flags |= pause_flags; val = br32(bp, B44_RXCONFIG); if (pause_flags & B44_FLAG_RX_PAUSE) val |= RXCONFIG_FLOW; else val &= ~RXCONFIG_FLOW; bw32(bp, B44_RXCONFIG, val); val = br32(bp, B44_MAC_FLOW); if (pause_flags & B44_FLAG_TX_PAUSE) val |= (MAC_FLOW_PAUSE_ENAB | (0xc0 & MAC_FLOW_RX_HI_WATER)); else val &= ~MAC_FLOW_PAUSE_ENAB; bw32(bp, B44_MAC_FLOW, val); } static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) { u32 pause_enab = 0; /* The driver supports only rx pause by default because the b44 mac tx pause mechanism generates excessive pause frames. Use ethtool to turn on b44 tx pause if necessary. */ if ((local & ADVERTISE_PAUSE_CAP) && (local & ADVERTISE_PAUSE_ASYM)){ if ((remote & LPA_PAUSE_ASYM) && !(remote & LPA_PAUSE_CAP)) pause_enab |= B44_FLAG_RX_PAUSE; } __b44_set_flow_ctrl(bp, pause_enab); } #ifdef CONFIG_BCM47XX #include <asm/mach-bcm47xx/nvram.h> static void b44_wap54g10_workaround(struct b44 *bp) { char buf[20]; u32 val; int err; /* * workaround for bad hardware design in Linksys WAP54G v1.0 * see https://dev.openwrt.org/ticket/146 * check and reset bit "isolate" */ if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0) return; if (simple_strtoul(buf, NULL, 0) == 2) { err = __b44_readphy(bp, 0, MII_BMCR, &val); if (err) goto error; if (!(val & BMCR_ISOLATE)) return; val &= ~BMCR_ISOLATE; err = __b44_writephy(bp, 0, MII_BMCR, val); if (err) goto error; } return; error: pr_warning("PHY: cannot reset MII transceiver isolate bit\n"); } #else static inline void b44_wap54g10_workaround(struct b44 *bp) { } #endif static int b44_setup_phy(struct b44 *bp) { u32 val; int err; b44_wap54g10_workaround(bp); if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) return 0; if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) goto out; if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, val & MII_ALEDCTRL_ALLMSK)) != 0) goto out; if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0) goto out; if ((err = b44_writephy(bp, B44_MII_TLEDCTRL, val | MII_TLEDCTRL_ENABLE)) != 0) goto out; if (!(bp->flags & B44_FLAG_FORCE_LINK)) { u32 adv = ADVERTISE_CSMA; if (bp->flags & B44_FLAG_ADV_10HALF) adv |= ADVERTISE_10HALF; if (bp->flags & B44_FLAG_ADV_10FULL) adv |= ADVERTISE_10FULL; if (bp->flags & B44_FLAG_ADV_100HALF) adv |= ADVERTISE_100HALF; if (bp->flags & B44_FLAG_ADV_100FULL) adv |= ADVERTISE_100FULL; if (bp->flags & B44_FLAG_PAUSE_AUTO) adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0) goto out; if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE | BMCR_ANRESTART))) != 0) goto out; } else { u32 bmcr; if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0) goto out; bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100); if (bp->flags & B44_FLAG_100_BASE_T) bmcr |= BMCR_SPEED100; if (bp->flags & B44_FLAG_FULL_DUPLEX) bmcr |= BMCR_FULLDPLX; if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0) goto out; /* Since we will not be negotiating there is no safe way * to determine if the link partner supports flow control * or not. So just disable it completely in this case. */ b44_set_flow_ctrl(bp, 0, 0); } out: return err; } static void b44_stats_update(struct b44 *bp) { unsigned long reg; u32 *val; val = &bp->hw_stats.tx_good_octets; for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) { *val++ += br32(bp, reg); } /* Pad */ reg += 8*4UL; for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) { *val++ += br32(bp, reg); } } static void b44_link_report(struct b44 *bp) { if (!netif_carrier_ok(bp->dev)) { netdev_info(bp->dev, "Link is down\n"); } else { netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n", (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10, (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half"); netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n", (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off", (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off"); } } static void b44_check_phy(struct b44 *bp) { u32 bmsr, aux; if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { bp->flags |= B44_FLAG_100_BASE_T; bp->flags |= B44_FLAG_FULL_DUPLEX; if (!netif_carrier_ok(bp->dev)) { u32 val = br32(bp, B44_TX_CTRL); val |= TX_CTRL_DUPLEX; bw32(bp, B44_TX_CTRL, val); netif_carrier_on(bp->dev); b44_link_report(bp); } return; } if (!b44_readphy(bp, MII_BMSR, &bmsr) && !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && (bmsr != 0xffff)) { if (aux & MII_AUXCTRL_SPEED) bp->flags |= B44_FLAG_100_BASE_T; else bp->flags &= ~B44_FLAG_100_BASE_T; if (aux & MII_AUXCTRL_DUPLEX) bp->flags |= B44_FLAG_FULL_DUPLEX; else bp->flags &= ~B44_FLAG_FULL_DUPLEX; if (!netif_carrier_ok(bp->dev) && (bmsr & BMSR_LSTATUS)) { u32 val = br32(bp, B44_TX_CTRL); u32 local_adv, remote_adv; if (bp->flags & B44_FLAG_FULL_DUPLEX) val |= TX_CTRL_DUPLEX; else val &= ~TX_CTRL_DUPLEX; bw32(bp, B44_TX_CTRL, val); if (!(bp->flags & B44_FLAG_FORCE_LINK) && !b44_readphy(bp, MII_ADVERTISE, &local_adv) && !b44_readphy(bp, MII_LPA, &remote_adv)) b44_set_flow_ctrl(bp, local_adv, remote_adv); /* Link now up */ netif_carrier_on(bp->dev); b44_link_report(bp); } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) { /* Link now down */ netif_carrier_off(bp->dev); b44_link_report(bp); } if (bmsr & BMSR_RFAULT) netdev_warn(bp->dev, "Remote fault detected in PHY\n"); if (bmsr & BMSR_JCD) netdev_warn(bp->dev, "Jabber detected in PHY\n"); } } static void b44_timer(unsigned long __opaque) { struct b44 *bp = (struct b44 *) __opaque; spin_lock_irq(&bp->lock); b44_check_phy(bp); b44_stats_update(bp); spin_unlock_irq(&bp->lock); mod_timer(&bp->timer, round_jiffies(jiffies + HZ)); } static void b44_tx(struct b44 *bp) { u32 cur, cons; cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; cur /= sizeof(struct dma_desc); /* XXX needs updating when NETIF_F_SG is supported */ for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) { struct ring_info *rp = &bp->tx_buffers[cons]; struct sk_buff *skb = rp->skb; BUG_ON(skb == NULL); dma_unmap_single(bp->sdev->dma_dev, rp->mapping, skb->len, DMA_TO_DEVICE); rp->skb = NULL; dev_kfree_skb_irq(skb); } bp->tx_cons = cons; if (netif_queue_stopped(bp->dev) && TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) netif_wake_queue(bp->dev); bw32(bp, B44_GPTIMER, 0); } /* Works like this. This chip writes a 'struct rx_header" 30 bytes * before the DMA address you give it. So we allocate 30 more bytes * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then * point the chip at 30 bytes past where the rx_header will go. */ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) { struct dma_desc *dp; struct ring_info *src_map, *map; struct rx_header *rh; struct sk_buff *skb; dma_addr_t mapping; int dest_idx; u32 ctrl; src_map = NULL; if (src_idx >= 0) src_map = &bp->rx_buffers[src_idx]; dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); map = &bp->rx_buffers[dest_idx]; skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ); if (skb == NULL) return -ENOMEM; mapping = dma_map_single(bp->sdev->dma_dev, skb->data, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); /* Hardware bug work-around, the chip is unable to do PCI DMA to/from anything above 1GB :-( */ if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { /* Sigh... */ if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); if (skb == NULL) return -ENOMEM; mapping = dma_map_single(bp->sdev->dma_dev, skb->data, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); dev_kfree_skb_any(skb); return -ENOMEM; } bp->force_copybreak = 1; } rh = (struct rx_header *) skb->data; rh->len = 0; rh->flags = 0; map->skb = skb; map->mapping = mapping; if (src_map != NULL) src_map->skb = NULL; ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ); if (dest_idx == (B44_RX_RING_SIZE - 1)) ctrl |= DESC_CTRL_EOT; dp = &bp->rx_ring[dest_idx]; dp->ctrl = cpu_to_le32(ctrl); dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset); if (bp->flags & B44_FLAG_RX_RING_HACK) b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, dest_idx * sizeof(*dp), DMA_BIDIRECTIONAL); return RX_PKT_BUF_SZ; } static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) { struct dma_desc *src_desc, *dest_desc; struct ring_info *src_map, *dest_map; struct rx_header *rh; int dest_idx; __le32 ctrl; dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); dest_desc = &bp->rx_ring[dest_idx]; dest_map = &bp->rx_buffers[dest_idx]; src_desc = &bp->rx_ring[src_idx]; src_map = &bp->rx_buffers[src_idx]; dest_map->skb = src_map->skb; rh = (struct rx_header *) src_map->skb->data; rh->len = 0; rh->flags = 0; dest_map->mapping = src_map->mapping; if (bp->flags & B44_FLAG_RX_RING_HACK) b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, src_idx * sizeof(*src_desc), DMA_BIDIRECTIONAL); ctrl = src_desc->ctrl; if (dest_idx == (B44_RX_RING_SIZE - 1)) ctrl |= cpu_to_le32(DESC_CTRL_EOT); else ctrl &= cpu_to_le32(~DESC_CTRL_EOT); dest_desc->ctrl = ctrl; dest_desc->addr = src_desc->addr; src_map->skb = NULL; if (bp->flags & B44_FLAG_RX_RING_HACK) b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, dest_idx * sizeof(*dest_desc), DMA_BIDIRECTIONAL); dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); } static int b44_rx(struct b44 *bp, int budget) { int received; u32 cons, prod; received = 0; prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK; prod /= sizeof(struct dma_desc); cons = bp->rx_cons; while (cons != prod && budget > 0) { struct ring_info *rp = &bp->rx_buffers[cons]; struct sk_buff *skb = rp->skb; dma_addr_t map = rp->mapping; struct rx_header *rh; u16 len; dma_sync_single_for_cpu(bp->sdev->dma_dev, map, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); rh = (struct rx_header *) skb->data; len = le16_to_cpu(rh->len); if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { drop_it: b44_recycle_rx(bp, cons, bp->rx_prod); drop_it_no_recycle: bp->dev->stats.rx_dropped++; goto next_pkt; } if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rh->len); } while (len == 0 && i++ < 5); if (len == 0) goto drop_it; } /* Omit CRC. */ len -= 4; if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) { int skb_size; skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); if (skb_size < 0) goto drop_it; dma_unmap_single(bp->sdev->dma_dev, map, skb_size, DMA_FROM_DEVICE); /* Leave out rx_header */ skb_put(skb, len + RX_PKT_OFFSET); skb_pull(skb, RX_PKT_OFFSET); } else { struct sk_buff *copy_skb; b44_recycle_rx(bp, cons, bp->rx_prod); copy_skb = netdev_alloc_skb(bp->dev, len + 2); if (copy_skb == NULL) goto drop_it_no_recycle; skb_reserve(copy_skb, 2); skb_put(copy_skb, len); /* DMA sync done above, copy just the actual packet */ skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET, copy_skb->data, len); skb = copy_skb; } skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, bp->dev); netif_receive_skb(skb); received++; budget--; next_pkt: bp->rx_prod = (bp->rx_prod + 1) & (B44_RX_RING_SIZE - 1); cons = (cons + 1) & (B44_RX_RING_SIZE - 1); } bp->rx_cons = cons; bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc)); return received; } static int b44_poll(struct napi_struct *napi, int budget) { struct b44 *bp = container_of(napi, struct b44, napi); int work_done; unsigned long flags; spin_lock_irqsave(&bp->lock, flags); if (bp->istat & (ISTAT_TX | ISTAT_TO)) { /* spin_lock(&bp->tx_lock); */ b44_tx(bp); /* spin_unlock(&bp->tx_lock); */ } if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */ bp->istat &= ~ISTAT_RFO; b44_disable_ints(bp); ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */ b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); netif_wake_queue(bp->dev); } spin_unlock_irqrestore(&bp->lock, flags); work_done = 0; if (bp->istat & ISTAT_RX) work_done += b44_rx(bp, budget); if (bp->istat & ISTAT_ERRORS) { spin_lock_irqsave(&bp->lock, flags); b44_halt(bp); b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); netif_wake_queue(bp->dev); spin_unlock_irqrestore(&bp->lock, flags); work_done = 0; } if (work_done < budget) { napi_complete(napi); b44_enable_ints(bp); } return work_done; } static irqreturn_t b44_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct b44 *bp = netdev_priv(dev); u32 istat, imask; int handled = 0; spin_lock(&bp->lock); istat = br32(bp, B44_ISTAT); imask = br32(bp, B44_IMASK); /* The interrupt mask register controls which interrupt bits * will actually raise an interrupt to the CPU when set by hw/firmware, * but doesn't mask off the bits. */ istat &= imask; if (istat) { handled = 1; if (unlikely(!netif_running(dev))) { netdev_info(dev, "late interrupt\n"); goto irq_ack; } if (napi_schedule_prep(&bp->napi)) { /* NOTE: These writes are posted by the readback of * the ISTAT register below. */ bp->istat = istat; __b44_disable_ints(bp); __napi_schedule(&bp->napi); } irq_ack: bw32(bp, B44_ISTAT, istat); br32(bp, B44_ISTAT); } spin_unlock(&bp->lock); return IRQ_RETVAL(handled); } static void b44_tx_timeout(struct net_device *dev) { struct b44 *bp = netdev_priv(dev); netdev_err(dev, "transmit timed out, resetting\n"); spin_lock_irq(&bp->lock); b44_halt(bp); b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); spin_unlock_irq(&bp->lock); b44_enable_ints(bp); netif_wake_queue(dev); } static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct b44 *bp = netdev_priv(dev); int rc = NETDEV_TX_OK; dma_addr_t mapping; u32 len, entry, ctrl; unsigned long flags; len = skb->len; spin_lock_irqsave(&bp->lock, flags); /* This is a hard error, log it. */ if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { netif_stop_queue(dev); netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); goto err_out; } mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { struct sk_buff *bounce_skb; /* Chip can't handle DMA to/from >1GB, use bounce buffer */ if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) dma_unmap_single(bp->sdev->dma_dev, mapping, len, DMA_TO_DEVICE); bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) goto err_out; mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) dma_unmap_single(bp->sdev->dma_dev, mapping, len, DMA_TO_DEVICE); dev_kfree_skb_any(bounce_skb); goto err_out; } skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); dev_kfree_skb_any(skb); skb = bounce_skb; } entry = bp->tx_prod; bp->tx_buffers[entry].skb = skb; bp->tx_buffers[entry].mapping = mapping; ctrl = (len & DESC_CTRL_LEN); ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; if (entry == (B44_TX_RING_SIZE - 1)) ctrl |= DESC_CTRL_EOT; bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); if (bp->flags & B44_FLAG_TX_RING_HACK) b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma, entry * sizeof(bp->tx_ring[0]), DMA_TO_DEVICE); entry = NEXT_TX(entry); bp->tx_prod = entry; wmb(); bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); if (bp->flags & B44_FLAG_BUGGY_TXPTR) bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); if (bp->flags & B44_FLAG_REORDER_BUG) br32(bp, B44_DMATX_PTR); if (TX_BUFFS_AVAIL(bp) < 1) netif_stop_queue(dev); out_unlock: spin_unlock_irqrestore(&bp->lock, flags); return rc; err_out: rc = NETDEV_TX_BUSY; goto out_unlock; } static int b44_change_mtu(struct net_device *dev, int new_mtu) { struct b44 *bp = netdev_priv(dev); if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU) return -EINVAL; if (!netif_running(dev)) { /* We'll just catch it later when the * device is up'd. */ dev->mtu = new_mtu; return 0; } spin_lock_irq(&bp->lock); b44_halt(bp); dev->mtu = new_mtu; b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); spin_unlock_irq(&bp->lock); b44_enable_ints(bp); return 0; } /* Free up pending packets in all rx/tx rings. * * The chip has been shut down and the driver detached from * the networking, so no interrupts or new tx packets will * end up in the driver. bp->lock is not held and we are not * in an interrupt context and thus may sleep. */ static void b44_free_rings(struct b44 *bp) { struct ring_info *rp; int i; for (i = 0; i < B44_RX_RING_SIZE; i++) { rp = &bp->rx_buffers[i]; if (rp->skb == NULL) continue; dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb_any(rp->skb); rp->skb = NULL; } /* XXX needs changes once NETIF_F_SG is set... */ for (i = 0; i < B44_TX_RING_SIZE; i++) { rp = &bp->tx_buffers[i]; if (rp->skb == NULL) continue; dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(rp->skb); rp->skb = NULL; } } /* Initialize tx/rx rings for packet processing. * * The chip has been shut down and the driver detached from * the networking, so no interrupts or new tx packets will * end up in the driver. */ static void b44_init_rings(struct b44 *bp) { int i; b44_free_rings(bp); memset(bp->rx_ring, 0, B44_RX_RING_BYTES); memset(bp->tx_ring, 0, B44_TX_RING_BYTES); if (bp->flags & B44_FLAG_RX_RING_HACK) dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); if (bp->flags & B44_FLAG_TX_RING_HACK) dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, DMA_TABLE_BYTES, DMA_TO_DEVICE); for (i = 0; i < bp->rx_pending; i++) { if (b44_alloc_rx_skb(bp, -1, i) < 0) break; } } /* * Must not be invoked with interrupt sources disabled and * the hardware shutdown down. */ static void b44_free_consistent(struct b44 *bp) { kfree(bp->rx_buffers); bp->rx_buffers = NULL; kfree(bp->tx_buffers); bp->tx_buffers = NULL; if (bp->rx_ring) { if (bp->flags & B44_FLAG_RX_RING_HACK) { dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); kfree(bp->rx_ring); } else dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, bp->rx_ring, bp->rx_ring_dma); bp->rx_ring = NULL; bp->flags &= ~B44_FLAG_RX_RING_HACK; } if (bp->tx_ring) { if (bp->flags & B44_FLAG_TX_RING_HACK) { dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, DMA_TABLE_BYTES, DMA_TO_DEVICE); kfree(bp->tx_ring); } else dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, bp->tx_ring, bp->tx_ring_dma); bp->tx_ring = NULL; bp->flags &= ~B44_FLAG_TX_RING_HACK; } } /* * Must not be invoked with interrupt sources disabled and * the hardware shutdown down. Can sleep. */ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) { int size; size = B44_RX_RING_SIZE * sizeof(struct ring_info); bp->rx_buffers = kzalloc(size, gfp); if (!bp->rx_buffers) goto out_err; size = B44_TX_RING_SIZE * sizeof(struct ring_info); bp->tx_buffers = kzalloc(size, gfp); if (!bp->tx_buffers) goto out_err; size = DMA_TABLE_BYTES; bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp); if (!bp->rx_ring) { /* Allocation may have failed due to pci_alloc_consistent insisting on use of GFP_DMA, which is more restrictive than necessary... */ struct dma_desc *rx_ring; dma_addr_t rx_ring_dma; rx_ring = kzalloc(size, gfp); if (!rx_ring) goto out_err; rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) || rx_ring_dma + size > DMA_BIT_MASK(30)) { kfree(rx_ring); goto out_err; } bp->rx_ring = rx_ring; bp->rx_ring_dma = rx_ring_dma; bp->flags |= B44_FLAG_RX_RING_HACK; } bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->tx_ring_dma, gfp); if (!bp->tx_ring) { /* Allocation may have failed due to ssb_dma_alloc_consistent insisting on use of GFP_DMA, which is more restrictive than necessary... */ struct dma_desc *tx_ring; dma_addr_t tx_ring_dma; tx_ring = kzalloc(size, gfp); if (!tx_ring) goto out_err; tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, DMA_TABLE_BYTES, DMA_TO_DEVICE); if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) || tx_ring_dma + size > DMA_BIT_MASK(30)) { kfree(tx_ring); goto out_err; } bp->tx_ring = tx_ring; bp->tx_ring_dma = tx_ring_dma; bp->flags |= B44_FLAG_TX_RING_HACK; } return 0; out_err: b44_free_consistent(bp); return -ENOMEM; } /* bp->lock is held. */ static void b44_clear_stats(struct b44 *bp) { unsigned long reg; bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) br32(bp, reg); for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) br32(bp, reg); } /* bp->lock is held. */ static void b44_chip_reset(struct b44 *bp, int reset_kind) { struct ssb_device *sdev = bp->sdev; bool was_enabled; was_enabled = ssb_device_is_enabled(bp->sdev); ssb_device_enable(bp->sdev, 0); ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev); if (was_enabled) { bw32(bp, B44_RCV_LAZY, 0); bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); bw32(bp, B44_DMATX_CTRL, 0); bp->tx_prod = bp->tx_cons = 0; if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) { b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, 100, 0); } bw32(bp, B44_DMARX_CTRL, 0); bp->rx_prod = bp->rx_cons = 0; } b44_clear_stats(bp); /* * Don't enable PHY if we are doing a partial reset * we are probably going to power down */ if (reset_kind == B44_CHIP_RESET_PARTIAL) return; switch (sdev->bus->bustype) { case SSB_BUSTYPE_SSB: bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus), B44_MDC_RATIO) & MDIO_CTRL_MAXF_MASK))); break; case SSB_BUSTYPE_PCI: bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | (0x0d & MDIO_CTRL_MAXF_MASK))); break; case SSB_BUSTYPE_PCMCIA: case SSB_BUSTYPE_SDIO: WARN_ON(1); /* A device with this bus does not exist. */ break; } br32(bp, B44_MDIO_CTRL); if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); br32(bp, B44_ENET_CTRL); bp->flags &= ~B44_FLAG_INTERNAL_PHY; } else { u32 val = br32(bp, B44_DEVCTRL); if (val & DEVCTRL_EPR) { bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); br32(bp, B44_DEVCTRL); udelay(100); } bp->flags |= B44_FLAG_INTERNAL_PHY; } } /* bp->lock is held. */ static void b44_halt(struct b44 *bp) { b44_disable_ints(bp); /* reset PHY */ b44_phy_reset(bp); /* power down PHY */ netdev_info(bp->dev, "powering down PHY\n"); bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN); /* now reset the chip, but without enabling the MAC&PHY * part of it. This has to be done _after_ we shut down the PHY */ b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); } /* bp->lock is held. */ static void __b44_set_mac_addr(struct b44 *bp) { bw32(bp, B44_CAM_CTRL, 0); if (!(bp->dev->flags & IFF_PROMISC)) { u32 val; __b44_cam_write(bp, bp->dev->dev_addr, 0); val = br32(bp, B44_CAM_CTRL); bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); } } static int b44_set_mac_addr(struct net_device *dev, void *p) { struct b44 *bp = netdev_priv(dev); struct sockaddr *addr = p; u32 val; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); spin_lock_irq(&bp->lock); val = br32(bp, B44_RXCONFIG); if (!(val & RXCONFIG_CAM_ABSENT)) __b44_set_mac_addr(bp); spin_unlock_irq(&bp->lock); return 0; } /* Called at device open time to get the chip ready for * packet processing. Invoked with bp->lock held. */ static void __b44_set_rx_mode(struct net_device *); static void b44_init_hw(struct b44 *bp, int reset_kind) { u32 val; b44_chip_reset(bp, B44_CHIP_RESET_FULL); if (reset_kind == B44_FULL_RESET) { b44_phy_reset(bp); b44_setup_phy(bp); } /* Enable CRC32, set proper LED modes and power on PHY */ bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL); bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT)); /* This sets the MAC address too. */ __b44_set_rx_mode(bp->dev); /* MTU + eth header + possible VLAN tag + struct rx_header */ bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ if (reset_kind == B44_PARTIAL_RESET) { bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); } else { bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); bw32(bp, B44_DMARX_PTR, bp->rx_pending); bp->rx_prod = bp->rx_pending; bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); } val = br32(bp, B44_ENET_CTRL); bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); } static int b44_open(struct net_device *dev) { struct b44 *bp = netdev_priv(dev); int err; err = b44_alloc_consistent(bp, GFP_KERNEL); if (err) goto out; napi_enable(&bp->napi); b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); b44_check_phy(bp); err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(err < 0)) { napi_disable(&bp->napi); b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); b44_free_rings(bp); b44_free_consistent(bp); goto out; } init_timer(&bp->timer); bp->timer.expires = jiffies + HZ; bp->timer.data = (unsigned long) bp; bp->timer.function = b44_timer; add_timer(&bp->timer); b44_enable_ints(bp); netif_start_queue(dev); out: return err; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled. */ static void b44_poll_controller(struct net_device *dev) { disable_irq(dev->irq); b44_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset) { u32 i; u32 *pattern = (u32 *) pp; for (i = 0; i < bytes; i += sizeof(u32)) { bw32(bp, B44_FILT_ADDR, table_offset + i); bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]); } } static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) { int magicsync = 6; int k, j, len = offset; int ethaddr_bytes = ETH_ALEN; memset(ppattern + offset, 0xff, magicsync); for (j = 0; j < magicsync; j++) set_bit(len++, (unsigned long *) pmask); for (j = 0; j < B44_MAX_PATTERNS; j++) { if ((B44_PATTERN_SIZE - len) >= ETH_ALEN) ethaddr_bytes = ETH_ALEN; else ethaddr_bytes = B44_PATTERN_SIZE - len; if (ethaddr_bytes <=0) break; for (k = 0; k< ethaddr_bytes; k++) { ppattern[offset + magicsync + (j * ETH_ALEN) + k] = macaddr[k]; set_bit(len++, (unsigned long *) pmask); } } return len - 1; } /* Setup magic packet patterns in the b44 WOL * pattern matching filter. */ static void b44_setup_pseudo_magicp(struct b44 *bp) { u32 val; int plen0, plen1, plen2; u8 *pwol_pattern; u8 pwol_mask[B44_PMASK_SIZE]; pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL); if (!pwol_pattern) { pr_err("Memory not available for WOL\n"); return; } /* Ipv4 magic packet pattern - pattern 0.*/ memset(pwol_mask, 0, B44_PMASK_SIZE); plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, B44_ETHIPV4UDP_HLEN); bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE); bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE); /* Raw ethernet II magic packet pattern - pattern 1 */ memset(pwol_pattern, 0, B44_PATTERN_SIZE); memset(pwol_mask, 0, B44_PMASK_SIZE); plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, ETH_HLEN); bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE + B44_PATTERN_SIZE); bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE + B44_PMASK_SIZE); /* Ipv6 magic packet pattern - pattern 2 */ memset(pwol_pattern, 0, B44_PATTERN_SIZE); memset(pwol_mask, 0, B44_PMASK_SIZE); plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, B44_ETHIPV6UDP_HLEN); bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE); bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE); kfree(pwol_pattern); /* set these pattern's lengths: one less than each real length */ val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE; bw32(bp, B44_WKUP_LEN, val); /* enable wakeup pattern matching */ val = br32(bp, B44_DEVCTRL); bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE); } #ifdef CONFIG_B44_PCI static void b44_setup_wol_pci(struct b44 *bp) { u16 val; if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) { bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE); pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val); pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE); } } #else static inline void b44_setup_wol_pci(struct b44 *bp) { } #endif /* CONFIG_B44_PCI */ static void b44_setup_wol(struct b44 *bp) { u32 val; bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI); if (bp->flags & B44_FLAG_B0_ANDLATER) { bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE); val = bp->dev->dev_addr[2] << 24 | bp->dev->dev_addr[3] << 16 | bp->dev->dev_addr[4] << 8 | bp->dev->dev_addr[5]; bw32(bp, B44_ADDR_LO, val); val = bp->dev->dev_addr[0] << 8 | bp->dev->dev_addr[1]; bw32(bp, B44_ADDR_HI, val); val = br32(bp, B44_DEVCTRL); bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE); } else { b44_setup_pseudo_magicp(bp); } b44_setup_wol_pci(bp); } static int b44_close(struct net_device *dev) { struct b44 *bp = netdev_priv(dev); netif_stop_queue(dev); napi_disable(&bp->napi); del_timer_sync(&bp->timer); spin_lock_irq(&bp->lock); b44_halt(bp); b44_free_rings(bp); netif_carrier_off(dev); spin_unlock_irq(&bp->lock); free_irq(dev->irq, dev); if (bp->flags & B44_FLAG_WOL_ENABLE) { b44_init_hw(bp, B44_PARTIAL_RESET); b44_setup_wol(bp); } b44_free_consistent(bp); return 0; } static struct net_device_stats *b44_get_stats(struct net_device *dev) { struct b44 *bp = netdev_priv(dev); struct net_device_stats *nstat = &dev->stats; struct b44_hw_stats *hwstat = &bp->hw_stats; /* Convert HW stats into netdevice stats. */ nstat->rx_packets = hwstat->rx_pkts; nstat->tx_packets = hwstat->tx_pkts; nstat->rx_bytes = hwstat->rx_octets; nstat->tx_bytes = hwstat->tx_octets; nstat->tx_errors = (hwstat->tx_jabber_pkts + hwstat->tx_oversize_pkts + hwstat->tx_underruns + hwstat->tx_excessive_cols + hwstat->tx_late_cols); nstat->multicast = hwstat->tx_multicast_pkts; nstat->collisions = hwstat->tx_total_cols; nstat->rx_length_errors = (hwstat->rx_oversize_pkts + hwstat->rx_undersize); nstat->rx_over_errors = hwstat->rx_missed_pkts; nstat->rx_frame_errors = hwstat->rx_align_errs; nstat->rx_crc_errors = hwstat->rx_crc_errs; nstat->rx_errors = (hwstat->rx_jabber_pkts + hwstat->rx_oversize_pkts + hwstat->rx_missed_pkts + hwstat->rx_crc_align_errs + hwstat->rx_undersize + hwstat->rx_crc_errs + hwstat->rx_align_errs + hwstat->rx_symbol_errs); nstat->tx_aborted_errors = hwstat->tx_underruns; #if 0 /* Carrier lost counter seems to be broken for some devices */ nstat->tx_carrier_errors = hwstat->tx_carrier_lost; #endif return nstat; } static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) { struct netdev_hw_addr *ha; int i, num_ents; num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE); i = 0; netdev_for_each_mc_addr(ha, dev) { if (i == num_ents) break; __b44_cam_write(bp, ha->addr, i++ + 1); } return i+1; } static void __b44_set_rx_mode(struct net_device *dev) { struct b44 *bp = netdev_priv(dev); u32 val; val = br32(bp, B44_RXCONFIG); val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) { val |= RXCONFIG_PROMISC; bw32(bp, B44_RXCONFIG, val); } else { unsigned char zero[6] = {0, 0, 0, 0, 0, 0}; int i = 1; __b44_set_mac_addr(bp); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE)) val |= RXCONFIG_ALLMULTI; else i = __b44_load_mcast(bp, dev); for (; i < 64; i++) __b44_cam_write(bp, zero, i); bw32(bp, B44_RXCONFIG, val); val = br32(bp, B44_CAM_CTRL); bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); } } static void b44_set_rx_mode(struct net_device *dev) { struct b44 *bp = netdev_priv(dev); spin_lock_irq(&bp->lock); __b44_set_rx_mode(dev); spin_unlock_irq(&bp->lock); } static u32 b44_get_msglevel(struct net_device *dev) { struct b44 *bp = netdev_priv(dev); return bp->msg_enable; } static void b44_set_msglevel(struct net_device *dev, u32 value) { struct b44 *bp = netdev_priv(dev); bp->msg_enable = value; } static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { struct b44 *bp = netdev_priv(dev); struct ssb_bus *bus = bp->sdev->bus; strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); switch (bus->bustype) { case SSB_BUSTYPE_PCI: strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info)); break; case SSB_BUSTYPE_SSB: strlcpy(info->bus_info, "SSB", sizeof(info->bus_info)); break; case SSB_BUSTYPE_PCMCIA: case SSB_BUSTYPE_SDIO: WARN_ON(1); /* A device with this bus does not exist. */ break; } } static int b44_nway_reset(struct net_device *dev) { struct b44 *bp = netdev_priv(dev); u32 bmcr; int r; spin_lock_irq(&bp->lock); b44_readphy(bp, MII_BMCR, &bmcr); b44_readphy(bp, MII_BMCR, &bmcr); r = -EINVAL; if (bmcr & BMCR_ANENABLE) { b44_writephy(bp, MII_BMCR, bmcr | BMCR_ANRESTART); r = 0; } spin_unlock_irq(&bp->lock); return r; } static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct b44 *bp = netdev_priv(dev); cmd->supported = (SUPPORTED_Autoneg); cmd->supported |= (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_MII); cmd->advertising = 0; if (bp->flags & B44_FLAG_ADV_10HALF) cmd->advertising |= ADVERTISED_10baseT_Half; if (bp->flags & B44_FLAG_ADV_10FULL) cmd->advertising |= ADVERTISED_10baseT_Full; if (bp->flags & B44_FLAG_ADV_100HALF) cmd->advertising |= ADVERTISED_100baseT_Half; if (bp->flags & B44_FLAG_ADV_100FULL) cmd->advertising |= ADVERTISED_100baseT_Full; cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ? SPEED_100 : SPEED_10; cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; cmd->port = 0; cmd->phy_address = bp->phy_addr; cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ? XCVR_INTERNAL : XCVR_EXTERNAL; cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ? AUTONEG_DISABLE : AUTONEG_ENABLE; if (cmd->autoneg == AUTONEG_ENABLE) cmd->advertising |= ADVERTISED_Autoneg; if (!netif_running(dev)){ cmd->speed = 0; cmd->duplex = 0xff; } cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; return 0; } static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct b44 *bp = netdev_priv(dev); /* We do not support gigabit. */ if (cmd->autoneg == AUTONEG_ENABLE) { if (cmd->advertising & (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) return -EINVAL; } else if ((cmd->speed != SPEED_100 && cmd->speed != SPEED_10) || (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)) { return -EINVAL; } spin_lock_irq(&bp->lock); if (cmd->autoneg == AUTONEG_ENABLE) { bp->flags &= ~(B44_FLAG_FORCE_LINK | B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX | B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL); if (cmd->advertising == 0) { bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL); } else { if (cmd->advertising & ADVERTISED_10baseT_Half) bp->flags |= B44_FLAG_ADV_10HALF; if (cmd->advertising & ADVERTISED_10baseT_Full) bp->flags |= B44_FLAG_ADV_10FULL; if (cmd->advertising & ADVERTISED_100baseT_Half) bp->flags |= B44_FLAG_ADV_100HALF; if (cmd->advertising & ADVERTISED_100baseT_Full) bp->flags |= B44_FLAG_ADV_100FULL; } } else { bp->flags |= B44_FLAG_FORCE_LINK; bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX); if (cmd->speed == SPEED_100) bp->flags |= B44_FLAG_100_BASE_T; if (cmd->duplex == DUPLEX_FULL) bp->flags |= B44_FLAG_FULL_DUPLEX; } if (netif_running(dev)) b44_setup_phy(bp); spin_unlock_irq(&bp->lock); return 0; } static void b44_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct b44 *bp = netdev_priv(dev); ering->rx_max_pending = B44_RX_RING_SIZE - 1; ering->rx_pending = bp->rx_pending; /* XXX ethtool lacks a tx_max_pending, oops... */ } static int b44_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct b44 *bp = netdev_priv(dev); if ((ering->rx_pending > B44_RX_RING_SIZE - 1) || (ering->rx_mini_pending != 0) || (ering->rx_jumbo_pending != 0) || (ering->tx_pending > B44_TX_RING_SIZE - 1)) return -EINVAL; spin_lock_irq(&bp->lock); bp->rx_pending = ering->rx_pending; bp->tx_pending = ering->tx_pending; b44_halt(bp); b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); netif_wake_queue(bp->dev); spin_unlock_irq(&bp->lock); b44_enable_ints(bp); return 0; } static void b44_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct b44 *bp = netdev_priv(dev); epause->autoneg = (bp->flags & B44_FLAG_PAUSE_AUTO) != 0; epause->rx_pause = (bp->flags & B44_FLAG_RX_PAUSE) != 0; epause->tx_pause = (bp->flags & B44_FLAG_TX_PAUSE) != 0; } static int b44_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct b44 *bp = netdev_priv(dev); spin_lock_irq(&bp->lock); if (epause->autoneg) bp->flags |= B44_FLAG_PAUSE_AUTO; else bp->flags &= ~B44_FLAG_PAUSE_AUTO; if (epause->rx_pause) bp->flags |= B44_FLAG_RX_PAUSE; else bp->flags &= ~B44_FLAG_RX_PAUSE; if (epause->tx_pause) bp->flags |= B44_FLAG_TX_PAUSE; else bp->flags &= ~B44_FLAG_TX_PAUSE; if (bp->flags & B44_FLAG_PAUSE_AUTO) { b44_halt(bp); b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); } else { __b44_set_flow_ctrl(bp, bp->flags); } spin_unlock_irq(&bp->lock); b44_enable_ints(bp); return 0; } static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch(stringset) { case ETH_SS_STATS: memcpy(data, *b44_gstrings, sizeof(b44_gstrings)); break; } } static int b44_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(b44_gstrings); default: return -EOPNOTSUPP; } } static void b44_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct b44 *bp = netdev_priv(dev); u32 *val = &bp->hw_stats.tx_good_octets; u32 i; spin_lock_irq(&bp->lock); b44_stats_update(bp); for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) *data++ = *val++; spin_unlock_irq(&bp->lock); } static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct b44 *bp = netdev_priv(dev); wol->supported = WAKE_MAGIC; if (bp->flags & B44_FLAG_WOL_ENABLE) wol->wolopts = WAKE_MAGIC; else wol->wolopts = 0; memset(&wol->sopass, 0, sizeof(wol->sopass)); } static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct b44 *bp = netdev_priv(dev); spin_lock_irq(&bp->lock); if (wol->wolopts & WAKE_MAGIC) bp->flags |= B44_FLAG_WOL_ENABLE; else bp->flags &= ~B44_FLAG_WOL_ENABLE; spin_unlock_irq(&bp->lock); return 0; } static const struct ethtool_ops b44_ethtool_ops = { .get_drvinfo = b44_get_drvinfo, .get_settings = b44_get_settings, .set_settings = b44_set_settings, .nway_reset = b44_nway_reset, .get_link = ethtool_op_get_link, .get_wol = b44_get_wol, .set_wol = b44_set_wol, .get_ringparam = b44_get_ringparam, .set_ringparam = b44_set_ringparam, .get_pauseparam = b44_get_pauseparam, .set_pauseparam = b44_set_pauseparam, .get_msglevel = b44_get_msglevel, .set_msglevel = b44_set_msglevel, .get_strings = b44_get_strings, .get_sset_count = b44_get_sset_count, .get_ethtool_stats = b44_get_ethtool_stats, }; static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); struct b44 *bp = netdev_priv(dev); int err = -EINVAL; if (!netif_running(dev)) goto out; spin_lock_irq(&bp->lock); err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL); spin_unlock_irq(&bp->lock); out: return err; } static int __devinit b44_get_invariants(struct b44 *bp) { struct ssb_device *sdev = bp->sdev; int err = 0; u8 *addr; bp->dma_offset = ssb_dma_translation(sdev); if (sdev->bus->bustype == SSB_BUSTYPE_SSB && instance > 1) { addr = sdev->bus->sprom.et1mac; bp->phy_addr = sdev->bus->sprom.et1phyaddr; } else { addr = sdev->bus->sprom.et0mac; bp->phy_addr = sdev->bus->sprom.et0phyaddr; } /* Some ROMs have buggy PHY addresses with the high * bits set (sign extension?). Truncate them to a * valid PHY address. */ bp->phy_addr &= 0x1F; memcpy(bp->dev->dev_addr, addr, 6); if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ pr_err("Invalid MAC address found in EEPROM\n"); return -EINVAL; } memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len); bp->imask = IMASK_DEF; /* XXX - really required? bp->flags |= B44_FLAG_BUGGY_TXPTR; */ if (bp->sdev->id.revision >= 7) bp->flags |= B44_FLAG_B0_ANDLATER; return err; } static const struct net_device_ops b44_netdev_ops = { .ndo_open = b44_open, .ndo_stop = b44_close, .ndo_start_xmit = b44_start_xmit, .ndo_get_stats = b44_get_stats, .ndo_set_multicast_list = b44_set_rx_mode, .ndo_set_mac_address = b44_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = b44_ioctl, .ndo_tx_timeout = b44_tx_timeout, .ndo_change_mtu = b44_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = b44_poll_controller, #endif }; static int __devinit b44_init_one(struct ssb_device *sdev, const struct ssb_device_id *ent) { static int b44_version_printed = 0; struct net_device *dev; struct b44 *bp; int err; instance++; if (b44_version_printed++ == 0) pr_info("%s", version); dev = alloc_etherdev(sizeof(*bp)); if (!dev) { dev_err(sdev->dev, "Etherdev alloc failed, aborting\n"); err = -ENOMEM; goto out; } SET_NETDEV_DEV(dev, sdev->dev); /* No interesting netdevice features in this card... */ dev->features |= 0; bp = netdev_priv(dev); bp->sdev = sdev; bp->dev = dev; bp->force_copybreak = 0; bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); spin_lock_init(&bp->lock); bp->rx_pending = B44_DEF_RX_RING_PENDING; bp->tx_pending = B44_DEF_TX_RING_PENDING; dev->netdev_ops = &b44_netdev_ops; netif_napi_add(dev, &bp->napi, b44_poll, 64); dev->watchdog_timeo = B44_TX_TIMEOUT; dev->irq = sdev->irq; SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); err = ssb_bus_powerup(sdev->bus, 0); if (err) { dev_err(sdev->dev, "Failed to powerup the bus\n"); goto err_out_free_dev; } if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) || dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) { dev_err(sdev->dev, "Required 30BIT DMA mask unsupported by the system\n"); goto err_out_powerdown; } err = b44_get_invariants(bp); if (err) { dev_err(sdev->dev, "Problem fetching invariants of chip, aborting\n"); goto err_out_powerdown; } bp->mii_if.dev = dev; bp->mii_if.mdio_read = b44_mii_read; bp->mii_if.mdio_write = b44_mii_write; bp->mii_if.phy_id = bp->phy_addr; bp->mii_if.phy_id_mask = 0x1f; bp->mii_if.reg_num_mask = 0x1f; /* By default, advertise all speed/duplex settings. */ bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL); /* By default, auto-negotiate PAUSE. */ bp->flags |= B44_FLAG_PAUSE_AUTO; err = register_netdev(dev); if (err) { dev_err(sdev->dev, "Cannot register net device, aborting\n"); goto err_out_powerdown; } netif_carrier_off(dev); ssb_set_drvdata(sdev, dev); /* Chip reset provides power to the b44 MAC & PCI cores, which * is necessary for MAC register access. */ b44_chip_reset(bp, B44_CHIP_RESET_FULL); /* do a phy reset to test if there is an active phy */ if (b44_phy_reset(bp) < 0) bp->phy_addr = B44_PHY_ADDR_NO_PHY; netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n", dev->dev_addr); return 0; err_out_powerdown: ssb_bus_may_powerdown(sdev->bus); err_out_free_dev: free_netdev(dev); out: return err; } static void __devexit b44_remove_one(struct ssb_device *sdev) { struct net_device *dev = ssb_get_drvdata(sdev); unregister_netdev(dev); ssb_device_disable(sdev, 0); ssb_bus_may_powerdown(sdev->bus); free_netdev(dev); ssb_pcihost_set_power_state(sdev, PCI_D3hot); ssb_set_drvdata(sdev, NULL); } static int b44_suspend(struct ssb_device *sdev, pm_message_t state) { struct net_device *dev = ssb_get_drvdata(sdev); struct b44 *bp = netdev_priv(dev); if (!netif_running(dev)) return 0; del_timer_sync(&bp->timer); spin_lock_irq(&bp->lock); b44_halt(bp); netif_carrier_off(bp->dev); netif_device_detach(bp->dev); b44_free_rings(bp); spin_unlock_irq(&bp->lock); free_irq(dev->irq, dev); if (bp->flags & B44_FLAG_WOL_ENABLE) { b44_init_hw(bp, B44_PARTIAL_RESET); b44_setup_wol(bp); } ssb_pcihost_set_power_state(sdev, PCI_D3hot); return 0; } static int b44_resume(struct ssb_device *sdev) { struct net_device *dev = ssb_get_drvdata(sdev); struct b44 *bp = netdev_priv(dev); int rc = 0; rc = ssb_bus_powerup(sdev->bus, 0); if (rc) { dev_err(sdev->dev, "Failed to powerup the bus\n"); return rc; } if (!netif_running(dev)) return 0; spin_lock_irq(&bp->lock); b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); spin_unlock_irq(&bp->lock); /* * As a shared interrupt, the handler can be called immediately. To be * able to check the interrupt status the hardware must already be * powered back on (b44_init_hw). */ rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); if (rc) { netdev_err(dev, "request_irq failed\n"); spin_lock_irq(&bp->lock); b44_halt(bp); b44_free_rings(bp); spin_unlock_irq(&bp->lock); return rc; } netif_device_attach(bp->dev); b44_enable_ints(bp); netif_wake_queue(dev); mod_timer(&bp->timer, jiffies + 1); return 0; } static struct ssb_driver b44_ssb_driver = { .name = DRV_MODULE_NAME, .id_table = b44_ssb_tbl, .probe = b44_init_one, .remove = __devexit_p(b44_remove_one), .suspend = b44_suspend, .resume = b44_resume, }; static inline int b44_pci_init(void) { int err = 0; #ifdef CONFIG_B44_PCI err = ssb_pcihost_register(&b44_pci_driver); #endif return err; } static inline void b44_pci_exit(void) { #ifdef CONFIG_B44_PCI ssb_pcihost_unregister(&b44_pci_driver); #endif } static int __init b44_init(void) { unsigned int dma_desc_align_size = dma_get_cache_alignment(); int err; /* Setup paramaters for syncing RX/TX DMA descriptors */ dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); err = b44_pci_init(); if (err) return err; err = ssb_driver_register(&b44_ssb_driver); if (err) b44_pci_exit(); return err; } static void __exit b44_cleanup(void) { ssb_driver_unregister(&b44_ssb_driver); b44_pci_exit(); } module_init(b44_init); module_exit(b44_cleanup);
gpl-2.0
gchild320/flounder
fs/ext4/crypto_policy.c
304
6446
/* * linux/fs/ext4/crypto_policy.c * * Copyright (C) 2015, Google, Inc. * * This contains encryption policy functions for ext4 * * Written by Michael Halcrow, 2015. */ #include <linux/random.h> #include <linux/string.h> #include <linux/types.h> #include "ext4.h" #include "xattr.h" static int ext4_inode_has_encryption_context(struct inode *inode) { int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, NULL, 0); return (res > 0); } /* * check whether the policy is consistent with the encryption context * for the inode */ static int ext4_is_encryption_context_consistent_with_policy( struct inode *inode, const struct ext4_encryption_policy *policy) { struct ext4_encryption_context ctx; int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, sizeof(ctx)); if (res != sizeof(ctx)) return 0; return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, EXT4_KEY_DESCRIPTOR_SIZE) == 0 && (ctx.flags == policy->flags) && (ctx.contents_encryption_mode == policy->contents_encryption_mode) && (ctx.filenames_encryption_mode == policy->filenames_encryption_mode)); } static int ext4_create_encryption_context_from_policy( struct inode *inode, const struct ext4_encryption_policy *policy) { struct ext4_encryption_context ctx; int res = 0; res = ext4_convert_inline_data(inode); if (res) return res; ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1; memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, EXT4_KEY_DESCRIPTOR_SIZE); if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) { printk(KERN_WARNING "%s: Invalid contents encryption mode %d\n", __func__, policy->contents_encryption_mode); return -EINVAL; } if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) { printk(KERN_WARNING "%s: Invalid filenames encryption mode %d\n", __func__, policy->filenames_encryption_mode); return -EINVAL; } if (policy->flags & ~EXT4_POLICY_FLAGS_VALID) return -EINVAL; ctx.contents_encryption_mode = policy->contents_encryption_mode; ctx.filenames_encryption_mode = policy->filenames_encryption_mode; ctx.flags = policy->flags; BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, sizeof(ctx), 0); if (!res) ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); return res; } int ext4_process_policy(const struct ext4_encryption_policy *policy, struct inode *inode) { if (policy->version != 0) return -EINVAL; if (!ext4_inode_has_encryption_context(inode)) { if (!S_ISDIR(inode->i_mode)) return -EINVAL; if (!ext4_empty_dir(inode)) return -ENOTEMPTY; return ext4_create_encryption_context_from_policy(inode, policy); } if (ext4_is_encryption_context_consistent_with_policy(inode, policy)) return 0; printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n", __func__); return -EINVAL; } int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy) { struct ext4_encryption_context ctx; int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, sizeof(ctx)); if (res != sizeof(ctx)) return -ENOENT; if (ctx.format != EXT4_ENCRYPTION_CONTEXT_FORMAT_V1) return -EINVAL; policy->version = 0; policy->contents_encryption_mode = ctx.contents_encryption_mode; policy->filenames_encryption_mode = ctx.filenames_encryption_mode; policy->flags = ctx.flags; memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, EXT4_KEY_DESCRIPTOR_SIZE); return 0; } int ext4_is_child_context_consistent_with_parent(struct inode *parent, struct inode *child) { struct ext4_crypt_info *parent_ci, *child_ci; int res; if ((parent == NULL) || (child == NULL)) { pr_err("parent %p child %p\n", parent, child); BUG_ON(1); } /* no restrictions if the parent directory is not encrypted */ if (!ext4_encrypted_inode(parent)) return 1; /* if the child directory is not encrypted, this is always a problem */ if (!ext4_encrypted_inode(child)) return 0; res = ext4_get_encryption_info(parent); if (res) return 0; res = ext4_get_encryption_info(child); if (res) return 0; parent_ci = EXT4_I(parent)->i_crypt_info; child_ci = EXT4_I(child)->i_crypt_info; if (!parent_ci && !child_ci) return 1; if (!parent_ci || !child_ci) return 0; return (memcmp(parent_ci->ci_master_key, child_ci->ci_master_key, EXT4_KEY_DESCRIPTOR_SIZE) == 0 && (parent_ci->ci_data_mode == child_ci->ci_data_mode) && (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) && (parent_ci->ci_flags == child_ci->ci_flags)); } /** * ext4_inherit_context() - Sets a child context from its parent * @parent: Parent inode from which the context is inherited. * @child: Child inode that inherits the context from @parent. * * Return: Zero on success, non-zero otherwise */ int ext4_inherit_context(struct inode *parent, struct inode *child) { struct ext4_encryption_context ctx; struct ext4_crypt_info *ci; int res; res = ext4_get_encryption_info(parent); if (res < 0) return res; ci = EXT4_I(parent)->i_crypt_info; if (ci == NULL) return -ENOKEY; ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1; if (DUMMY_ENCRYPTION_ENABLED(EXT4_SB(parent->i_sb))) { ctx.contents_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_CTS; ctx.flags = 0; memset(ctx.master_key_descriptor, 0x42, EXT4_KEY_DESCRIPTOR_SIZE); res = 0; } else { ctx.contents_encryption_mode = ci->ci_data_mode; ctx.filenames_encryption_mode = ci->ci_filename_mode; ctx.flags = ci->ci_flags; memcpy(ctx.master_key_descriptor, ci->ci_master_key, EXT4_KEY_DESCRIPTOR_SIZE); } get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); res = ext4_xattr_set(child, EXT4_XATTR_INDEX_ENCRYPTION, EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, sizeof(ctx), 0); if (!res) { ext4_set_inode_flag(child, EXT4_INODE_ENCRYPT); ext4_clear_inode_state(child, EXT4_STATE_MAY_INLINE_DATA); res = ext4_get_encryption_info(child); } return res; }
gpl-2.0
Ken-Liu/OpenScrKernel_For_XC210
drivers/staging/usbip/stub_rx.c
560
15480
/* * Copyright (C) 2003-2008 Takahiro Hirofuchi * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include <linux/slab.h> #include "usbip_common.h" #include "stub.h" #include <linux/usb/hcd.h> static int is_clear_halt_cmd(struct urb *urb) { struct usb_ctrlrequest *req; req = (struct usb_ctrlrequest *) urb->setup_packet; return (req->bRequest == USB_REQ_CLEAR_FEATURE) && (req->bRequestType == USB_RECIP_ENDPOINT) && (req->wValue == USB_ENDPOINT_HALT); } static int is_set_interface_cmd(struct urb *urb) { struct usb_ctrlrequest *req; req = (struct usb_ctrlrequest *) urb->setup_packet; return (req->bRequest == USB_REQ_SET_INTERFACE) && (req->bRequestType == USB_RECIP_INTERFACE); } static int is_set_configuration_cmd(struct urb *urb) { struct usb_ctrlrequest *req; req = (struct usb_ctrlrequest *) urb->setup_packet; return (req->bRequest == USB_REQ_SET_CONFIGURATION) && (req->bRequestType == USB_RECIP_DEVICE); } static int is_reset_device_cmd(struct urb *urb) { struct usb_ctrlrequest *req; __u16 value; __u16 index; req = (struct usb_ctrlrequest *) urb->setup_packet; value = le16_to_cpu(req->wValue); index = le16_to_cpu(req->wIndex); if ((req->bRequest == USB_REQ_SET_FEATURE) && (req->bRequestType == USB_RT_PORT) && (value == USB_PORT_FEAT_RESET)) { usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index); return 1; } else return 0; } static int tweak_clear_halt_cmd(struct urb *urb) { struct usb_ctrlrequest *req; int target_endp; int target_dir; int target_pipe; int ret; req = (struct usb_ctrlrequest *) urb->setup_packet; /* * The stalled endpoint is specified in the wIndex value. The endpoint * of the urb is the target of this clear_halt request (i.e., control * endpoint). */ target_endp = le16_to_cpu(req->wIndex) & 0x000f; /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */ target_dir = le16_to_cpu(req->wIndex) & 0x0080; if (target_dir) target_pipe = usb_rcvctrlpipe(urb->dev, target_endp); else target_pipe = usb_sndctrlpipe(urb->dev, target_endp); ret = usb_clear_halt(urb->dev, target_pipe); if (ret < 0) usbip_uinfo("clear_halt error: devnum %d endp %d, %d\n", urb->dev->devnum, target_endp, ret); else usbip_uinfo("clear_halt done: devnum %d endp %d\n", urb->dev->devnum, target_endp); return ret; } static int tweak_set_interface_cmd(struct urb *urb) { struct usb_ctrlrequest *req; __u16 alternate; __u16 interface; int ret; req = (struct usb_ctrlrequest *) urb->setup_packet; alternate = le16_to_cpu(req->wValue); interface = le16_to_cpu(req->wIndex); usbip_dbg_stub_rx("set_interface: inf %u alt %u\n", interface, alternate); ret = usb_set_interface(urb->dev, interface, alternate); if (ret < 0) usbip_uinfo("set_interface error: inf %u alt %u, %d\n", interface, alternate, ret); else usbip_uinfo("set_interface done: inf %u alt %u\n", interface, alternate); return ret; } static int tweak_set_configuration_cmd(struct urb *urb) { struct usb_ctrlrequest *req; __u16 config; req = (struct usb_ctrlrequest *) urb->setup_packet; config = le16_to_cpu(req->wValue); /* * I have never seen a multi-config device. Very rare. * For most devices, this will be called to choose a default * configuration only once in an initialization phase. * * set_configuration may change a device configuration and its device * drivers will be unbound and assigned for a new device configuration. * This means this usbip driver will be also unbound when called, then * eventually reassigned to the device as far as driver matching * condition is kept. * * Unfortunatelly, an existing usbip connection will be dropped * due to this driver unbinding. So, skip here. * A user may need to set a special configuration value before * exporting the device. */ usbip_uinfo("set_configuration (%d) to %s\n", config, dev_name(&urb->dev->dev)); usbip_uinfo("but, skip!\n"); return 0; /* return usb_driver_set_configuration(urb->dev, config); */ } static int tweak_reset_device_cmd(struct urb *urb) { struct usb_ctrlrequest *req; __u16 value; __u16 index; int ret; req = (struct usb_ctrlrequest *) urb->setup_packet; value = le16_to_cpu(req->wValue); index = le16_to_cpu(req->wIndex); usbip_uinfo("reset_device (port %d) to %s\n", index, dev_name(&urb->dev->dev)); /* all interfaces should be owned by usbip driver, so just reset it. */ ret = usb_lock_device_for_reset(urb->dev, NULL); if (ret < 0) { dev_err(&urb->dev->dev, "lock for reset\n"); return ret; } /* try to reset the device */ ret = usb_reset_device(urb->dev); if (ret < 0) dev_err(&urb->dev->dev, "device reset\n"); usb_unlock_device(urb->dev); return ret; } /* * clear_halt, set_interface, and set_configuration require special tricks. */ static void tweak_special_requests(struct urb *urb) { if (!urb || !urb->setup_packet) return; if (usb_pipetype(urb->pipe) != PIPE_CONTROL) return; if (is_clear_halt_cmd(urb)) /* tweak clear_halt */ tweak_clear_halt_cmd(urb); else if (is_set_interface_cmd(urb)) /* tweak set_interface */ tweak_set_interface_cmd(urb); else if (is_set_configuration_cmd(urb)) /* tweak set_configuration */ tweak_set_configuration_cmd(urb); else if (is_reset_device_cmd(urb)) tweak_reset_device_cmd(urb); else usbip_dbg_stub_rx("no need to tweak\n"); } /* * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb(). * By unlinking the urb asynchronously, stub_rx can continuously * process coming urbs. Even if the urb is unlinked, its completion * handler will be called and stub_tx will send a return pdu. * * See also comments about unlinking strategy in vhci_hcd.c. */ static int stub_recv_cmd_unlink(struct stub_device *sdev, struct usbip_header *pdu) { unsigned long flags; struct stub_priv *priv; spin_lock_irqsave(&sdev->priv_lock, flags); list_for_each_entry(priv, &sdev->priv_init, list) { if (priv->seqnum == pdu->u.cmd_unlink.seqnum) { int ret; dev_info(&priv->urb->dev->dev, "unlink urb %p\n", priv->urb); /* * This matched urb is not completed yet (i.e., be in * flight in usb hcd hardware/driver). Now we are * cancelling it. The unlinking flag means that we are * now not going to return the normal result pdu of a * submission request, but going to return a result pdu * of the unlink request. */ priv->unlinking = 1; /* * In the case that unlinking flag is on, prev->seqnum * is changed from the seqnum of the cancelling urb to * the seqnum of the unlink request. This will be used * to make the result pdu of the unlink request. */ priv->seqnum = pdu->base.seqnum; spin_unlock_irqrestore(&sdev->priv_lock, flags); /* * usb_unlink_urb() is now out of spinlocking to avoid * spinlock recursion since stub_complete() is * sometimes called in this context but not in the * interrupt context. If stub_complete() is executed * before we call usb_unlink_urb(), usb_unlink_urb() * will return an error value. In this case, stub_tx * will return the result pdu of this unlink request * though submission is completed and actual unlinking * is not executed. OK? */ /* In the above case, urb->status is not -ECONNRESET, * so a driver in a client host will know the failure * of the unlink request ? */ ret = usb_unlink_urb(priv->urb); if (ret != -EINPROGRESS) dev_err(&priv->urb->dev->dev, "failed to unlink a urb %p, ret %d\n", priv->urb, ret); return 0; } } usbip_dbg_stub_rx("seqnum %d is not pending\n", pdu->u.cmd_unlink.seqnum); /* * The urb of the unlink target is not found in priv_init queue. It was * already completed and its results is/was going to be sent by a * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only * return the completeness of this unlink request to vhci_hcd. */ stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0); spin_unlock_irqrestore(&sdev->priv_lock, flags); return 0; } static int valid_request(struct stub_device *sdev, struct usbip_header *pdu) { struct usbip_device *ud = &sdev->ud; if (pdu->base.devid == sdev->devid) { spin_lock(&ud->lock); if (ud->status == SDEV_ST_USED) { /* A request is valid. */ spin_unlock(&ud->lock); return 1; } spin_unlock(&ud->lock); } return 0; } static struct stub_priv *stub_priv_alloc(struct stub_device *sdev, struct usbip_header *pdu) { struct stub_priv *priv; struct usbip_device *ud = &sdev->ud; unsigned long flags; spin_lock_irqsave(&sdev->priv_lock, flags); priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC); if (!priv) { dev_err(&sdev->interface->dev, "alloc stub_priv\n"); spin_unlock_irqrestore(&sdev->priv_lock, flags); usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); return NULL; } priv->seqnum = pdu->base.seqnum; priv->sdev = sdev; /* * After a stub_priv is linked to a list_head, * our error handler can free allocated data. */ list_add_tail(&priv->list, &sdev->priv_init); spin_unlock_irqrestore(&sdev->priv_lock, flags); return priv; } static struct usb_host_endpoint *get_ep_from_epnum(struct usb_device *udev, int epnum0) { struct usb_host_config *config; int i = 0, j = 0; struct usb_host_endpoint *ep = NULL; int epnum; int found = 0; if (epnum0 == 0) return &udev->ep0; config = udev->actconfig; if (!config) return NULL; for (i = 0; i < config->desc.bNumInterfaces; i++) { struct usb_host_interface *setting; setting = config->interface[i]->cur_altsetting; for (j = 0; j < setting->desc.bNumEndpoints; j++) { ep = &setting->endpoint[j]; epnum = (ep->desc.bEndpointAddress & 0x7f); if (epnum == epnum0) { /* usbip_uinfo("found epnum %d\n", epnum0);*/ found = 1; break; } } } if (found) return ep; else return NULL; } static int get_pipe(struct stub_device *sdev, int epnum, int dir) { struct usb_device *udev = interface_to_usbdev(sdev->interface); struct usb_host_endpoint *ep; struct usb_endpoint_descriptor *epd = NULL; ep = get_ep_from_epnum(udev, epnum); if (!ep) { dev_err(&sdev->interface->dev, "no such endpoint?, %d\n", epnum); BUG(); } epd = &ep->desc; #if 0 /* epnum 0 is always control */ if (epnum == 0) { if (dir == USBIP_DIR_OUT) return usb_sndctrlpipe(udev, 0); else return usb_rcvctrlpipe(udev, 0); } #endif if (usb_endpoint_xfer_control(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndctrlpipe(udev, epnum); else return usb_rcvctrlpipe(udev, epnum); } if (usb_endpoint_xfer_bulk(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndbulkpipe(udev, epnum); else return usb_rcvbulkpipe(udev, epnum); } if (usb_endpoint_xfer_int(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndintpipe(udev, epnum); else return usb_rcvintpipe(udev, epnum); } if (usb_endpoint_xfer_isoc(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndisocpipe(udev, epnum); else return usb_rcvisocpipe(udev, epnum); } /* NOT REACHED */ dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum); return 0; } static void stub_recv_cmd_submit(struct stub_device *sdev, struct usbip_header *pdu) { int ret; struct stub_priv *priv; struct usbip_device *ud = &sdev->ud; struct usb_device *udev = interface_to_usbdev(sdev->interface); int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); priv = stub_priv_alloc(sdev, pdu); if (!priv) return; /* setup a urb */ if (usb_pipeisoc(pipe)) priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets, GFP_KERNEL); else priv->urb = usb_alloc_urb(0, GFP_KERNEL); if (!priv->urb) { dev_err(&sdev->interface->dev, "malloc urb\n"); usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); return; } /* set priv->urb->transfer_buffer */ if (pdu->u.cmd_submit.transfer_buffer_length > 0) { priv->urb->transfer_buffer = kzalloc(pdu->u.cmd_submit.transfer_buffer_length, GFP_KERNEL); if (!priv->urb->transfer_buffer) { dev_err(&sdev->interface->dev, "malloc x_buff\n"); usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); return; } } /* set priv->urb->setup_packet */ priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8, GFP_KERNEL); if (!priv->urb->setup_packet) { dev_err(&sdev->interface->dev, "allocate setup_packet\n"); usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); return; } /* set other members from the base header of pdu */ priv->urb->context = (void *) priv; priv->urb->dev = udev; priv->urb->pipe = pipe; priv->urb->complete = stub_complete; usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0); if (usbip_recv_xbuff(ud, priv->urb) < 0) return; if (usbip_recv_iso(ud, priv->urb) < 0) return; /* no need to submit an intercepted request, but harmless? */ tweak_special_requests(priv->urb); /* urb is now ready to submit */ ret = usb_submit_urb(priv->urb, GFP_KERNEL); if (ret == 0) usbip_dbg_stub_rx("submit urb ok, seqnum %u\n", pdu->base.seqnum); else { dev_err(&sdev->interface->dev, "submit_urb error, %d\n", ret); usbip_dump_header(pdu); usbip_dump_urb(priv->urb); /* * Pessimistic. * This connection will be discarded. */ usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT); } usbip_dbg_stub_rx("Leave\n"); return; } /* recv a pdu */ static void stub_rx_pdu(struct usbip_device *ud) { int ret; struct usbip_header pdu; struct stub_device *sdev = container_of(ud, struct stub_device, ud); struct device *dev = &sdev->interface->dev; usbip_dbg_stub_rx("Enter\n"); memset(&pdu, 0, sizeof(pdu)); /* 1. receive a pdu header */ ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0); if (ret != sizeof(pdu)) { dev_err(dev, "recv a header, %d\n", ret); usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); return; } usbip_header_correct_endian(&pdu, 0); if (usbip_dbg_flag_stub_rx) usbip_dump_header(&pdu); if (!valid_request(sdev, &pdu)) { dev_err(dev, "recv invalid request\n"); usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); return; } switch (pdu.base.command) { case USBIP_CMD_UNLINK: stub_recv_cmd_unlink(sdev, &pdu); break; case USBIP_CMD_SUBMIT: stub_recv_cmd_submit(sdev, &pdu); break; default: /* NOTREACHED */ dev_err(dev, "unknown pdu\n"); usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); return; } } void stub_rx_loop(struct usbip_task *ut) { struct usbip_device *ud = container_of(ut, struct usbip_device, tcp_rx); while (1) { if (signal_pending(current)) { usbip_dbg_stub_rx("signal caught!\n"); break; } if (usbip_event_happened(ud)) break; stub_rx_pdu(ud); } }
gpl-2.0
Krylon360/SGS4G_Kernel_GB
drivers/gpu/drm/drm_buffer.c
816
4956
/************************************************************************** * * Copyright 2010 Pauli Nieminen. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * **************************************************************************/ /* * Multipart buffer for coping data which is larger than the page size. * * Authors: * Pauli Nieminen <suokkos-at-gmail-dot-com> */ #include "drm_buffer.h" /** * Allocate the drm buffer object. * * buf: Pointer to a pointer where the object is stored. * size: The number of bytes to allocate. */ int drm_buffer_alloc(struct drm_buffer **buf, int size) { int nr_pages = size / PAGE_SIZE + 1; int idx; /* Allocating pointer table to end of structure makes drm_buffer * variable sized */ *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *), GFP_KERNEL); if (*buf == NULL) { DRM_ERROR("Failed to allocate drm buffer object to hold" " %d bytes in %d pages.\n", size, nr_pages); return -ENOMEM; } (*buf)->size = size; for (idx = 0; idx < nr_pages; ++idx) { (*buf)->data[idx] = kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE), GFP_KERNEL); if ((*buf)->data[idx] == NULL) { DRM_ERROR("Failed to allocate %dth page for drm" " buffer with %d bytes and %d pages.\n", idx + 1, size, nr_pages); goto error_out; } } return 0; error_out: /* Only last element can be null pointer so check for it first. */ if ((*buf)->data[idx]) kfree((*buf)->data[idx]); for (--idx; idx >= 0; --idx) kfree((*buf)->data[idx]); kfree(*buf); return -ENOMEM; } EXPORT_SYMBOL(drm_buffer_alloc); /** * Copy the user data to the begin of the buffer and reset the processing * iterator. * * user_data: A pointer the data that is copied to the buffer. * size: The Number of bytes to copy. */ extern int drm_buffer_copy_from_user(struct drm_buffer *buf, void __user *user_data, int size) { int nr_pages = size / PAGE_SIZE + 1; int idx; if (size > buf->size) { DRM_ERROR("Requesting to copy %d bytes to a drm buffer with" " %d bytes space\n", size, buf->size); return -EFAULT; } for (idx = 0; idx < nr_pages; ++idx) { if (DRM_COPY_FROM_USER(buf->data[idx], user_data + idx * PAGE_SIZE, min(PAGE_SIZE, size - idx * PAGE_SIZE))) { DRM_ERROR("Failed to copy user data (%p) to drm buffer" " (%p) %dth page.\n", user_data, buf, idx); return -EFAULT; } } buf->iterator = 0; return 0; } EXPORT_SYMBOL(drm_buffer_copy_from_user); /** * Free the drm buffer object */ void drm_buffer_free(struct drm_buffer *buf) { if (buf != NULL) { int nr_pages = buf->size / PAGE_SIZE + 1; int idx; for (idx = 0; idx < nr_pages; ++idx) kfree(buf->data[idx]); kfree(buf); } } EXPORT_SYMBOL(drm_buffer_free); /** * Read an object from buffer that may be split to multiple parts. If object * is not split function just returns the pointer to object in buffer. But in * case of split object data is copied to given stack object that is suplied * by caller. * * The processing location of the buffer is also advanced to the next byte * after the object. * * objsize: The size of the objet in bytes. * stack_obj: A pointer to a memory location where object can be copied. */ void *drm_buffer_read_object(struct drm_buffer *buf, int objsize, void *stack_obj) { int idx = drm_buffer_index(buf); int page = drm_buffer_page(buf); void *obj = 0; if (idx + objsize <= PAGE_SIZE) { obj = &buf->data[page][idx]; } else { /* The object is split which forces copy to temporary object.*/ int beginsz = PAGE_SIZE - idx; memcpy(stack_obj, &buf->data[page][idx], beginsz); memcpy(stack_obj + beginsz, &buf->data[page + 1][0], objsize - beginsz); obj = stack_obj; } drm_buffer_advance(buf, objsize); return obj; } EXPORT_SYMBOL(drm_buffer_read_object);
gpl-2.0
glewarne/testing
init/initramfs.c
2096
13319
/* * Many of the syscalls used in this file expect some of the arguments * to be __user pointers not __kernel pointers. To limit the sparse * noise, turn off sparse checking for this file. */ #ifdef __CHECKER__ #undef __CHECKER__ #warning "Sparse checking disabled for this file" #endif #include <linux/init.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/dirent.h> #include <linux/syscalls.h> #include <linux/utime.h> static __initdata char *message; static void __init error(char *x) { if (!message) message = x; } /* link hash */ #define N_ALIGN(len) ((((len) + 1) & ~3) + 2) static __initdata struct hash { int ino, minor, major; umode_t mode; struct hash *next; char name[N_ALIGN(PATH_MAX)]; } *head[32]; static inline int hash(int major, int minor, int ino) { unsigned long tmp = ino + minor + (major << 3); tmp += tmp >> 5; return tmp & 31; } static char __init *find_link(int major, int minor, int ino, umode_t mode, char *name) { struct hash **p, *q; for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) { if ((*p)->ino != ino) continue; if ((*p)->minor != minor) continue; if ((*p)->major != major) continue; if (((*p)->mode ^ mode) & S_IFMT) continue; return (*p)->name; } q = kmalloc(sizeof(struct hash), GFP_KERNEL); if (!q) panic("can't allocate link hash entry"); q->major = major; q->minor = minor; q->ino = ino; q->mode = mode; strcpy(q->name, name); q->next = NULL; *p = q; return NULL; } static void __init free_hash(void) { struct hash **p, *q; for (p = head; p < head + 32; p++) { while (*p) { q = *p; *p = q->next; kfree(q); } } } static long __init do_utime(char *filename, time_t mtime) { struct timespec t[2]; t[0].tv_sec = mtime; t[0].tv_nsec = 0; t[1].tv_sec = mtime; t[1].tv_nsec = 0; return do_utimes(AT_FDCWD, filename, t, AT_SYMLINK_NOFOLLOW); } static __initdata LIST_HEAD(dir_list); struct dir_entry { struct list_head list; char *name; time_t mtime; }; static void __init dir_add(const char *name, time_t mtime) { struct dir_entry *de = kmalloc(sizeof(struct dir_entry), GFP_KERNEL); if (!de) panic("can't allocate dir_entry buffer"); INIT_LIST_HEAD(&de->list); de->name = kstrdup(name, GFP_KERNEL); de->mtime = mtime; list_add(&de->list, &dir_list); } static void __init dir_utime(void) { struct dir_entry *de, *tmp; list_for_each_entry_safe(de, tmp, &dir_list, list) { list_del(&de->list); do_utime(de->name, de->mtime); kfree(de->name); kfree(de); } } static __initdata time_t mtime; /* cpio header parsing */ static __initdata unsigned long ino, major, minor, nlink; static __initdata umode_t mode; static __initdata unsigned long body_len, name_len; static __initdata uid_t uid; static __initdata gid_t gid; static __initdata unsigned rdev; static void __init parse_header(char *s) { unsigned long parsed[12]; char buf[9]; int i; buf[8] = '\0'; for (i = 0, s += 6; i < 12; i++, s += 8) { memcpy(buf, s, 8); parsed[i] = simple_strtoul(buf, NULL, 16); } ino = parsed[0]; mode = parsed[1]; uid = parsed[2]; gid = parsed[3]; nlink = parsed[4]; mtime = parsed[5]; body_len = parsed[6]; major = parsed[7]; minor = parsed[8]; rdev = new_encode_dev(MKDEV(parsed[9], parsed[10])); name_len = parsed[11]; } /* FSM */ static __initdata enum state { Start, Collect, GotHeader, SkipIt, GotName, CopyFile, GotSymlink, Reset } state, next_state; static __initdata char *victim; static __initdata unsigned count; static __initdata loff_t this_header, next_header; static inline void __init eat(unsigned n) { victim += n; this_header += n; count -= n; } static __initdata char *vcollected; static __initdata char *collected; static __initdata int remains; static __initdata char *collect; static void __init read_into(char *buf, unsigned size, enum state next) { if (count >= size) { collected = victim; eat(size); state = next; } else { collect = collected = buf; remains = size; next_state = next; state = Collect; } } static __initdata char *header_buf, *symlink_buf, *name_buf; static int __init do_start(void) { read_into(header_buf, 110, GotHeader); return 0; } static int __init do_collect(void) { unsigned n = remains; if (count < n) n = count; memcpy(collect, victim, n); eat(n); collect += n; if ((remains -= n) != 0) return 1; state = next_state; return 0; } static int __init do_header(void) { if (memcmp(collected, "070707", 6)==0) { error("incorrect cpio method used: use -H newc option"); return 1; } if (memcmp(collected, "070701", 6)) { error("no cpio magic"); return 1; } parse_header(collected); next_header = this_header + N_ALIGN(name_len) + body_len; next_header = (next_header + 3) & ~3; state = SkipIt; if (name_len <= 0 || name_len > PATH_MAX) return 0; if (S_ISLNK(mode)) { if (body_len > PATH_MAX) return 0; collect = collected = symlink_buf; remains = N_ALIGN(name_len) + body_len; next_state = GotSymlink; state = Collect; return 0; } if (S_ISREG(mode) || !body_len) read_into(name_buf, N_ALIGN(name_len), GotName); return 0; } static int __init do_skip(void) { if (this_header + count < next_header) { eat(count); return 1; } else { eat(next_header - this_header); state = next_state; return 0; } } static int __init do_reset(void) { while(count && *victim == '\0') eat(1); if (count && (this_header & 3)) error("broken padding"); return 1; } static int __init maybe_link(void) { if (nlink >= 2) { char *old = find_link(major, minor, ino, mode, collected); if (old) return (sys_link(old, collected) < 0) ? -1 : 1; } return 0; } static void __init clean_path(char *path, umode_t mode) { struct stat st; if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) { if (S_ISDIR(st.st_mode)) sys_rmdir(path); else sys_unlink(path); } } static __initdata int wfd; static int __init do_name(void) { state = SkipIt; next_state = Reset; if (strcmp(collected, "TRAILER!!!") == 0) { free_hash(); return 0; } clean_path(collected, mode); if (S_ISREG(mode)) { int ml = maybe_link(); if (ml >= 0) { int openflags = O_WRONLY|O_CREAT; if (ml != 1) openflags |= O_TRUNC; wfd = sys_open(collected, openflags, mode); if (wfd >= 0) { sys_fchown(wfd, uid, gid); sys_fchmod(wfd, mode); if (body_len) sys_ftruncate(wfd, body_len); vcollected = kstrdup(collected, GFP_KERNEL); state = CopyFile; } } } else if (S_ISDIR(mode)) { sys_mkdir(collected, mode); sys_chown(collected, uid, gid); sys_chmod(collected, mode); dir_add(collected, mtime); } else if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { if (maybe_link() == 0) { sys_mknod(collected, mode, rdev); sys_chown(collected, uid, gid); sys_chmod(collected, mode); do_utime(collected, mtime); } } return 0; } static int __init do_copy(void) { if (count >= body_len) { sys_write(wfd, victim, body_len); sys_close(wfd); do_utime(vcollected, mtime); kfree(vcollected); eat(body_len); state = SkipIt; return 0; } else { sys_write(wfd, victim, count); body_len -= count; eat(count); return 1; } } static int __init do_symlink(void) { collected[N_ALIGN(name_len) + body_len] = '\0'; clean_path(collected, 0); sys_symlink(collected + N_ALIGN(name_len), collected); sys_lchown(collected, uid, gid); do_utime(collected, mtime); state = SkipIt; next_state = Reset; return 0; } static __initdata int (*actions[])(void) = { [Start] = do_start, [Collect] = do_collect, [GotHeader] = do_header, [SkipIt] = do_skip, [GotName] = do_name, [CopyFile] = do_copy, [GotSymlink] = do_symlink, [Reset] = do_reset, }; static int __init write_buffer(char *buf, unsigned len) { count = len; victim = buf; while (!actions[state]()) ; return len - count; } static int __init flush_buffer(void *bufv, unsigned len) { char *buf = (char *) bufv; int written; int origLen = len; if (message) return -1; while ((written = write_buffer(buf, len)) < len && !message) { char c = buf[written]; if (c == '0') { buf += written; len -= written; state = Start; } else if (c == 0) { buf += written; len -= written; state = Reset; } else error("junk in compressed archive"); } return origLen; } static unsigned my_inptr; /* index of next byte to be processed in inbuf */ #include <linux/decompress/generic.h> static char * __init unpack_to_rootfs(char *buf, unsigned len) { int written, res; decompress_fn decompress; const char *compress_name; static __initdata char msg_buf[64]; header_buf = kmalloc(110, GFP_KERNEL); symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL); name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL); if (!header_buf || !symlink_buf || !name_buf) panic("can't allocate buffers"); state = Start; this_header = 0; message = NULL; while (!message && len) { loff_t saved_offset = this_header; if (*buf == '0' && !(this_header & 3)) { state = Start; written = write_buffer(buf, len); buf += written; len -= written; continue; } if (!*buf) { buf++; len--; this_header++; continue; } this_header = 0; decompress = decompress_method(buf, len, &compress_name); if (decompress) { res = decompress(buf, len, NULL, flush_buffer, NULL, &my_inptr, error); if (res) error("decompressor failed"); } else if (compress_name) { if (!message) { snprintf(msg_buf, sizeof msg_buf, "compression method %s not configured", compress_name); message = msg_buf; } } else error("junk in compressed archive"); if (state != Reset) error("junk in compressed archive"); this_header = saved_offset + my_inptr; buf += my_inptr; len -= my_inptr; } dir_utime(); kfree(name_buf); kfree(symlink_buf); kfree(header_buf); return message; } static int __initdata do_retain_initrd; static int __init retain_initrd_param(char *str) { if (*str) return 0; do_retain_initrd = 1; return 1; } __setup("retain_initrd", retain_initrd_param); extern char __initramfs_start[]; extern unsigned long __initramfs_size; #include <linux/initrd.h> #include <linux/kexec.h> static void __init free_initrd(void) { #ifdef CONFIG_KEXEC unsigned long crashk_start = (unsigned long)__va(crashk_res.start); unsigned long crashk_end = (unsigned long)__va(crashk_res.end); #endif if (do_retain_initrd) goto skip; #ifdef CONFIG_KEXEC /* * If the initrd region is overlapped with crashkernel reserved region, * free only memory that is not part of crashkernel region. */ if (initrd_start < crashk_end && initrd_end > crashk_start) { /* * Initialize initrd memory region since the kexec boot does * not do. */ memset((void *)initrd_start, 0, initrd_end - initrd_start); if (initrd_start < crashk_start) free_initrd_mem(initrd_start, crashk_start); if (initrd_end > crashk_end) free_initrd_mem(crashk_end, initrd_end); } else #endif free_initrd_mem(initrd_start, initrd_end); skip: initrd_start = 0; initrd_end = 0; } #ifdef CONFIG_BLK_DEV_RAM #define BUF_SIZE 1024 static void __init clean_rootfs(void) { int fd; void *buf; struct linux_dirent64 *dirp; int num; fd = sys_open("/", O_RDONLY, 0); WARN_ON(fd < 0); if (fd < 0) return; buf = kzalloc(BUF_SIZE, GFP_KERNEL); WARN_ON(!buf); if (!buf) { sys_close(fd); return; } dirp = buf; num = sys_getdents64(fd, dirp, BUF_SIZE); while (num > 0) { while (num > 0) { struct stat st; int ret; ret = sys_newlstat(dirp->d_name, &st); WARN_ON_ONCE(ret); if (!ret) { if (S_ISDIR(st.st_mode)) sys_rmdir(dirp->d_name); else sys_unlink(dirp->d_name); } num -= dirp->d_reclen; dirp = (void *)dirp + dirp->d_reclen; } dirp = buf; memset(buf, 0, BUF_SIZE); num = sys_getdents64(fd, dirp, BUF_SIZE); } sys_close(fd); kfree(buf); } #endif static int __init populate_rootfs(void) { char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size); if (err) panic(err); /* Failed to decompress INTERNAL initramfs */ if (initrd_start) { #ifdef CONFIG_BLK_DEV_RAM int fd; printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n"); err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start); if (!err) { free_initrd(); goto done; } else { clean_rootfs(); unpack_to_rootfs(__initramfs_start, __initramfs_size); } printk(KERN_INFO "rootfs image is not initramfs (%s)" "; looks like an initrd\n", err); fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700); if (fd >= 0) { sys_write(fd, (char *)initrd_start, initrd_end - initrd_start); sys_close(fd); free_initrd(); } done: #else printk(KERN_INFO "Unpacking initramfs...\n"); err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start); if (err) printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); free_initrd(); #endif /* * Try loading default modules from initramfs. This gives * us a chance to load before device_initcalls. */ load_default_modules(); } return 0; } rootfs_initcall(populate_rootfs);
gpl-2.0
shinru2004/N860_Kernel
sound/pci/ymfpci/ymfpci.c
3632
11114
/* * The driver for the Yamaha's DS1/DS1E cards * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/ymfpci.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Yamaha DS-1 PCI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Yamaha,YMF724}," "{Yamaha,YMF724F}," "{Yamaha,YMF740}," "{Yamaha,YMF740C}," "{Yamaha,YMF744}," "{Yamaha,YMF754}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static long fm_port[SNDRV_CARDS]; static long mpu_port[SNDRV_CARDS]; #ifdef SUPPORT_JOYSTICK static long joystick_port[SNDRV_CARDS]; #endif static int rear_switch[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the Yamaha DS-1 PCI soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the Yamaha DS-1 PCI soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Yamaha DS-1 soundcard."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 Port."); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM OPL-3 Port."); #ifdef SUPPORT_JOYSTICK module_param_array(joystick_port, long, NULL, 0444); MODULE_PARM_DESC(joystick_port, "Joystick port address"); #endif module_param_array(rear_switch, bool, NULL, 0444); MODULE_PARM_DESC(rear_switch, "Enable shared rear/line-in switch"); static DEFINE_PCI_DEVICE_TABLE(snd_ymfpci_ids) = { { PCI_VDEVICE(YAMAHA, 0x0004), 0, }, /* YMF724 */ { PCI_VDEVICE(YAMAHA, 0x000d), 0, }, /* YMF724F */ { PCI_VDEVICE(YAMAHA, 0x000a), 0, }, /* YMF740 */ { PCI_VDEVICE(YAMAHA, 0x000c), 0, }, /* YMF740C */ { PCI_VDEVICE(YAMAHA, 0x0010), 0, }, /* YMF744 */ { PCI_VDEVICE(YAMAHA, 0x0012), 0, }, /* YMF754 */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_ymfpci_ids); #ifdef SUPPORT_JOYSTICK static int __devinit snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, int legacy_ctrl, int legacy_ctrl2) { struct gameport *gp; struct resource *r = NULL; int io_port = joystick_port[dev]; if (!io_port) return -ENODEV; if (chip->pci->device >= 0x0010) { /* YMF 744/754 */ if (io_port == 1) { /* auto-detect */ if (!(io_port = pci_resource_start(chip->pci, 2))) return -ENODEV; } } else { if (io_port == 1) { /* auto-detect */ for (io_port = 0x201; io_port <= 0x205; io_port++) { if (io_port == 0x203) continue; if ((r = request_region(io_port, 1, "YMFPCI gameport")) != NULL) break; } if (!r) { printk(KERN_ERR "ymfpci: no gameport ports available\n"); return -EBUSY; } } switch (io_port) { case 0x201: legacy_ctrl2 |= 0 << 6; break; case 0x202: legacy_ctrl2 |= 1 << 6; break; case 0x204: legacy_ctrl2 |= 2 << 6; break; case 0x205: legacy_ctrl2 |= 3 << 6; break; default: printk(KERN_ERR "ymfpci: invalid joystick port %#x", io_port); return -EINVAL; } } if (!r && !(r = request_region(io_port, 1, "YMFPCI gameport"))) { printk(KERN_ERR "ymfpci: joystick port %#x is in use.\n", io_port); return -EBUSY; } chip->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "ymfpci: cannot allocate memory for gameport\n"); release_and_free_resource(r); return -ENOMEM; } gameport_set_name(gp, "Yamaha YMF Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(chip->pci)); gameport_set_dev_parent(gp, &chip->pci->dev); gp->io = io_port; gameport_set_port_data(gp, r); if (chip->pci->device >= 0x0010) /* YMF 744/754 */ pci_write_config_word(chip->pci, PCIR_DSXG_JOYBASE, io_port); pci_write_config_word(chip->pci, PCIR_DSXG_LEGACY, legacy_ctrl | YMFPCI_LEGACY_JPEN); pci_write_config_word(chip->pci, PCIR_DSXG_ELEGACY, legacy_ctrl2); gameport_register_port(chip->gameport); return 0; } void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { if (chip->gameport) { struct resource *r = gameport_get_port_data(chip->gameport); gameport_unregister_port(chip->gameport); chip->gameport = NULL; release_and_free_resource(r); } } #else static inline int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, int l, int l2) { return -ENOSYS; } void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { } #endif /* SUPPORT_JOYSTICK */ static int __devinit snd_card_ymfpci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct resource *fm_res = NULL; struct resource *mpu_res = NULL; struct snd_ymfpci *chip; struct snd_opl3 *opl3; const char *str, *model; int err; u16 legacy_ctrl, legacy_ctrl2, old_legacy_ctrl; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch (pci_id->device) { case 0x0004: str = "YMF724"; model = "DS-1"; break; case 0x000d: str = "YMF724F"; model = "DS-1"; break; case 0x000a: str = "YMF740"; model = "DS-1L"; break; case 0x000c: str = "YMF740C"; model = "DS-1L"; break; case 0x0010: str = "YMF744"; model = "DS-1S"; break; case 0x0012: str = "YMF754"; model = "DS-1E"; break; default: model = str = "???"; break; } legacy_ctrl = 0; legacy_ctrl2 = 0x0800; /* SBEN = 0, SMOD = 01, LAD = 0 */ if (pci_id->device >= 0x0010) { /* YMF 744/754 */ if (fm_port[dev] == 1) { /* auto-detect */ fm_port[dev] = pci_resource_start(pci, 1); } if (fm_port[dev] > 0 && (fm_res = request_region(fm_port[dev], 4, "YMFPCI OPL3")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_FMEN; pci_write_config_word(pci, PCIR_DSXG_FMBASE, fm_port[dev]); } if (mpu_port[dev] == 1) { /* auto-detect */ mpu_port[dev] = pci_resource_start(pci, 1) + 0x20; } if (mpu_port[dev] > 0 && (mpu_res = request_region(mpu_port[dev], 2, "YMFPCI MPU401")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_MEN; pci_write_config_word(pci, PCIR_DSXG_MPU401BASE, mpu_port[dev]); } } else { switch (fm_port[dev]) { case 0x388: legacy_ctrl2 |= 0; break; case 0x398: legacy_ctrl2 |= 1; break; case 0x3a0: legacy_ctrl2 |= 2; break; case 0x3a8: legacy_ctrl2 |= 3; break; default: fm_port[dev] = 0; break; } if (fm_port[dev] > 0 && (fm_res = request_region(fm_port[dev], 4, "YMFPCI OPL3")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_FMEN; } else { legacy_ctrl2 &= ~YMFPCI_LEGACY2_FMIO; fm_port[dev] = 0; } switch (mpu_port[dev]) { case 0x330: legacy_ctrl2 |= 0 << 4; break; case 0x300: legacy_ctrl2 |= 1 << 4; break; case 0x332: legacy_ctrl2 |= 2 << 4; break; case 0x334: legacy_ctrl2 |= 3 << 4; break; default: mpu_port[dev] = 0; break; } if (mpu_port[dev] > 0 && (mpu_res = request_region(mpu_port[dev], 2, "YMFPCI MPU401")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_MEN; } else { legacy_ctrl2 &= ~YMFPCI_LEGACY2_MPUIO; mpu_port[dev] = 0; } } if (mpu_res) { legacy_ctrl |= YMFPCI_LEGACY_MIEN; legacy_ctrl2 |= YMFPCI_LEGACY2_IMOD; } pci_read_config_word(pci, PCIR_DSXG_LEGACY, &old_legacy_ctrl); pci_write_config_word(pci, PCIR_DSXG_LEGACY, legacy_ctrl); pci_write_config_word(pci, PCIR_DSXG_ELEGACY, legacy_ctrl2); if ((err = snd_ymfpci_create(card, pci, old_legacy_ctrl, &chip)) < 0) { snd_card_free(card); release_and_free_resource(mpu_res); release_and_free_resource(fm_res); return err; } chip->fm_res = fm_res; chip->mpu_res = mpu_res; card->private_data = chip; strcpy(card->driver, str); sprintf(card->shortname, "Yamaha %s (%s)", model, str); sprintf(card->longname, "%s at 0x%lx, irq %i", card->shortname, chip->reg_area_phys, chip->irq); if ((err = snd_ymfpci_pcm(chip, 0, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_ymfpci_pcm_spdif(chip, 1, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_ymfpci_pcm_4ch(chip, 2, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_ymfpci_pcm2(chip, 3, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_ymfpci_mixer(chip, rear_switch[dev])) < 0) { snd_card_free(card); return err; } if ((err = snd_ymfpci_timer(chip, 0)) < 0) { snd_card_free(card); return err; } if (chip->mpu_res) { if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_YMFPCI, mpu_port[dev], MPU401_INFO_INTEGRATED, pci->irq, 0, &chip->rawmidi)) < 0) { printk(KERN_WARNING "ymfpci: cannot initialize MPU401 at 0x%lx, skipping...\n", mpu_port[dev]); legacy_ctrl &= ~YMFPCI_LEGACY_MIEN; /* disable MPU401 irq */ pci_write_config_word(pci, PCIR_DSXG_LEGACY, legacy_ctrl); } } if (chip->fm_res) { if ((err = snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_OPL3, 1, &opl3)) < 0) { printk(KERN_WARNING "ymfpci: cannot initialize FM OPL3 at 0x%lx, skipping...\n", fm_port[dev]); legacy_ctrl &= ~YMFPCI_LEGACY_FMEN; pci_write_config_word(pci, PCIR_DSXG_LEGACY, legacy_ctrl); } else if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); snd_printk(KERN_ERR "cannot create opl3 hwdep\n"); return err; } } snd_ymfpci_create_gameport(chip, dev, legacy_ctrl, legacy_ctrl2); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_card_ymfpci_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = "Yamaha DS-1 PCI", .id_table = snd_ymfpci_ids, .probe = snd_card_ymfpci_probe, .remove = __devexit_p(snd_card_ymfpci_remove), #ifdef CONFIG_PM .suspend = snd_ymfpci_suspend, .resume = snd_ymfpci_resume, #endif }; static int __init alsa_card_ymfpci_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_ymfpci_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_ymfpci_init) module_exit(alsa_card_ymfpci_exit)
gpl-2.0
garyd9/linux_kernel_GT-P6210
sound/pci/ymfpci/ymfpci.c
3632
11114
/* * The driver for the Yamaha's DS1/DS1E cards * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/ymfpci.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Yamaha DS-1 PCI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Yamaha,YMF724}," "{Yamaha,YMF724F}," "{Yamaha,YMF740}," "{Yamaha,YMF740C}," "{Yamaha,YMF744}," "{Yamaha,YMF754}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static long fm_port[SNDRV_CARDS]; static long mpu_port[SNDRV_CARDS]; #ifdef SUPPORT_JOYSTICK static long joystick_port[SNDRV_CARDS]; #endif static int rear_switch[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the Yamaha DS-1 PCI soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the Yamaha DS-1 PCI soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Yamaha DS-1 soundcard."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 Port."); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM OPL-3 Port."); #ifdef SUPPORT_JOYSTICK module_param_array(joystick_port, long, NULL, 0444); MODULE_PARM_DESC(joystick_port, "Joystick port address"); #endif module_param_array(rear_switch, bool, NULL, 0444); MODULE_PARM_DESC(rear_switch, "Enable shared rear/line-in switch"); static DEFINE_PCI_DEVICE_TABLE(snd_ymfpci_ids) = { { PCI_VDEVICE(YAMAHA, 0x0004), 0, }, /* YMF724 */ { PCI_VDEVICE(YAMAHA, 0x000d), 0, }, /* YMF724F */ { PCI_VDEVICE(YAMAHA, 0x000a), 0, }, /* YMF740 */ { PCI_VDEVICE(YAMAHA, 0x000c), 0, }, /* YMF740C */ { PCI_VDEVICE(YAMAHA, 0x0010), 0, }, /* YMF744 */ { PCI_VDEVICE(YAMAHA, 0x0012), 0, }, /* YMF754 */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_ymfpci_ids); #ifdef SUPPORT_JOYSTICK static int __devinit snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, int legacy_ctrl, int legacy_ctrl2) { struct gameport *gp; struct resource *r = NULL; int io_port = joystick_port[dev]; if (!io_port) return -ENODEV; if (chip->pci->device >= 0x0010) { /* YMF 744/754 */ if (io_port == 1) { /* auto-detect */ if (!(io_port = pci_resource_start(chip->pci, 2))) return -ENODEV; } } else { if (io_port == 1) { /* auto-detect */ for (io_port = 0x201; io_port <= 0x205; io_port++) { if (io_port == 0x203) continue; if ((r = request_region(io_port, 1, "YMFPCI gameport")) != NULL) break; } if (!r) { printk(KERN_ERR "ymfpci: no gameport ports available\n"); return -EBUSY; } } switch (io_port) { case 0x201: legacy_ctrl2 |= 0 << 6; break; case 0x202: legacy_ctrl2 |= 1 << 6; break; case 0x204: legacy_ctrl2 |= 2 << 6; break; case 0x205: legacy_ctrl2 |= 3 << 6; break; default: printk(KERN_ERR "ymfpci: invalid joystick port %#x", io_port); return -EINVAL; } } if (!r && !(r = request_region(io_port, 1, "YMFPCI gameport"))) { printk(KERN_ERR "ymfpci: joystick port %#x is in use.\n", io_port); return -EBUSY; } chip->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "ymfpci: cannot allocate memory for gameport\n"); release_and_free_resource(r); return -ENOMEM; } gameport_set_name(gp, "Yamaha YMF Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(chip->pci)); gameport_set_dev_parent(gp, &chip->pci->dev); gp->io = io_port; gameport_set_port_data(gp, r); if (chip->pci->device >= 0x0010) /* YMF 744/754 */ pci_write_config_word(chip->pci, PCIR_DSXG_JOYBASE, io_port); pci_write_config_word(chip->pci, PCIR_DSXG_LEGACY, legacy_ctrl | YMFPCI_LEGACY_JPEN); pci_write_config_word(chip->pci, PCIR_DSXG_ELEGACY, legacy_ctrl2); gameport_register_port(chip->gameport); return 0; } void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { if (chip->gameport) { struct resource *r = gameport_get_port_data(chip->gameport); gameport_unregister_port(chip->gameport); chip->gameport = NULL; release_and_free_resource(r); } } #else static inline int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, int l, int l2) { return -ENOSYS; } void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { } #endif /* SUPPORT_JOYSTICK */ static int __devinit snd_card_ymfpci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct resource *fm_res = NULL; struct resource *mpu_res = NULL; struct snd_ymfpci *chip; struct snd_opl3 *opl3; const char *str, *model; int err; u16 legacy_ctrl, legacy_ctrl2, old_legacy_ctrl; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch (pci_id->device) { case 0x0004: str = "YMF724"; model = "DS-1"; break; case 0x000d: str = "YMF724F"; model = "DS-1"; break; case 0x000a: str = "YMF740"; model = "DS-1L"; break; case 0x000c: str = "YMF740C"; model = "DS-1L"; break; case 0x0010: str = "YMF744"; model = "DS-1S"; break; case 0x0012: str = "YMF754"; model = "DS-1E"; break; default: model = str = "???"; break; } legacy_ctrl = 0; legacy_ctrl2 = 0x0800; /* SBEN = 0, SMOD = 01, LAD = 0 */ if (pci_id->device >= 0x0010) { /* YMF 744/754 */ if (fm_port[dev] == 1) { /* auto-detect */ fm_port[dev] = pci_resource_start(pci, 1); } if (fm_port[dev] > 0 && (fm_res = request_region(fm_port[dev], 4, "YMFPCI OPL3")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_FMEN; pci_write_config_word(pci, PCIR_DSXG_FMBASE, fm_port[dev]); } if (mpu_port[dev] == 1) { /* auto-detect */ mpu_port[dev] = pci_resource_start(pci, 1) + 0x20; } if (mpu_port[dev] > 0 && (mpu_res = request_region(mpu_port[dev], 2, "YMFPCI MPU401")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_MEN; pci_write_config_word(pci, PCIR_DSXG_MPU401BASE, mpu_port[dev]); } } else { switch (fm_port[dev]) { case 0x388: legacy_ctrl2 |= 0; break; case 0x398: legacy_ctrl2 |= 1; break; case 0x3a0: legacy_ctrl2 |= 2; break; case 0x3a8: legacy_ctrl2 |= 3; break; default: fm_port[dev] = 0; break; } if (fm_port[dev] > 0 && (fm_res = request_region(fm_port[dev], 4, "YMFPCI OPL3")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_FMEN; } else { legacy_ctrl2 &= ~YMFPCI_LEGACY2_FMIO; fm_port[dev] = 0; } switch (mpu_port[dev]) { case 0x330: legacy_ctrl2 |= 0 << 4; break; case 0x300: legacy_ctrl2 |= 1 << 4; break; case 0x332: legacy_ctrl2 |= 2 << 4; break; case 0x334: legacy_ctrl2 |= 3 << 4; break; default: mpu_port[dev] = 0; break; } if (mpu_port[dev] > 0 && (mpu_res = request_region(mpu_port[dev], 2, "YMFPCI MPU401")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_MEN; } else { legacy_ctrl2 &= ~YMFPCI_LEGACY2_MPUIO; mpu_port[dev] = 0; } } if (mpu_res) { legacy_ctrl |= YMFPCI_LEGACY_MIEN; legacy_ctrl2 |= YMFPCI_LEGACY2_IMOD; } pci_read_config_word(pci, PCIR_DSXG_LEGACY, &old_legacy_ctrl); pci_write_config_word(pci, PCIR_DSXG_LEGACY, legacy_ctrl); pci_write_config_word(pci, PCIR_DSXG_ELEGACY, legacy_ctrl2); if ((err = snd_ymfpci_create(card, pci, old_legacy_ctrl, &chip)) < 0) { snd_card_free(card); release_and_free_resource(mpu_res); release_and_free_resource(fm_res); return err; } chip->fm_res = fm_res; chip->mpu_res = mpu_res; card->private_data = chip; strcpy(card->driver, str); sprintf(card->shortname, "Yamaha %s (%s)", model, str); sprintf(card->longname, "%s at 0x%lx, irq %i", card->shortname, chip->reg_area_phys, chip->irq); if ((err = snd_ymfpci_pcm(chip, 0, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_ymfpci_pcm_spdif(chip, 1, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_ymfpci_pcm_4ch(chip, 2, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_ymfpci_pcm2(chip, 3, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_ymfpci_mixer(chip, rear_switch[dev])) < 0) { snd_card_free(card); return err; } if ((err = snd_ymfpci_timer(chip, 0)) < 0) { snd_card_free(card); return err; } if (chip->mpu_res) { if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_YMFPCI, mpu_port[dev], MPU401_INFO_INTEGRATED, pci->irq, 0, &chip->rawmidi)) < 0) { printk(KERN_WARNING "ymfpci: cannot initialize MPU401 at 0x%lx, skipping...\n", mpu_port[dev]); legacy_ctrl &= ~YMFPCI_LEGACY_MIEN; /* disable MPU401 irq */ pci_write_config_word(pci, PCIR_DSXG_LEGACY, legacy_ctrl); } } if (chip->fm_res) { if ((err = snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_OPL3, 1, &opl3)) < 0) { printk(KERN_WARNING "ymfpci: cannot initialize FM OPL3 at 0x%lx, skipping...\n", fm_port[dev]); legacy_ctrl &= ~YMFPCI_LEGACY_FMEN; pci_write_config_word(pci, PCIR_DSXG_LEGACY, legacy_ctrl); } else if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); snd_printk(KERN_ERR "cannot create opl3 hwdep\n"); return err; } } snd_ymfpci_create_gameport(chip, dev, legacy_ctrl, legacy_ctrl2); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_card_ymfpci_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = "Yamaha DS-1 PCI", .id_table = snd_ymfpci_ids, .probe = snd_card_ymfpci_probe, .remove = __devexit_p(snd_card_ymfpci_remove), #ifdef CONFIG_PM .suspend = snd_ymfpci_suspend, .resume = snd_ymfpci_resume, #endif }; static int __init alsa_card_ymfpci_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_ymfpci_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_ymfpci_init) module_exit(alsa_card_ymfpci_exit)
gpl-2.0
alanorth/samsung-kernel-p6200
sound/pci/vx222/vx222.c
3632
7678
/* * Driver for Digigram VX222 V2/Mic PCI soundcards * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/tlv.h> #include "vx222.h" #define CARD_NAME "VX222" MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("Digigram VX222 V2/Mic"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int mic[SNDRV_CARDS]; /* microphone */ static int ibl[SNDRV_CARDS]; /* microphone */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Digigram " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Digigram " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Digigram " CARD_NAME " soundcard."); module_param_array(mic, bool, NULL, 0444); MODULE_PARM_DESC(mic, "Enable Microphone."); module_param_array(ibl, int, NULL, 0444); MODULE_PARM_DESC(ibl, "Capture IBL size."); /* */ enum { VX_PCI_VX222_OLD, VX_PCI_VX222_NEW }; static DEFINE_PCI_DEVICE_TABLE(snd_vx222_ids) = { { 0x10b5, 0x9050, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_OLD, }, /* PLX */ { 0x10b5, 0x9030, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_NEW, }, /* PLX */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_vx222_ids); /* */ static const DECLARE_TLV_DB_SCALE(db_scale_old_vol, -11350, 50, 0); static const DECLARE_TLV_DB_SCALE(db_scale_akm, -7350, 50, 0); static struct snd_vx_hardware vx222_old_hw = { .name = "VX222/Old", .type = VX_TYPE_BOARD, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX_ANALOG_OUT_LEVEL_MAX, .output_level_db_scale = db_scale_old_vol, }; static struct snd_vx_hardware vx222_v2_hw = { .name = "VX222/v2", .type = VX_TYPE_V2, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; static struct snd_vx_hardware vx222_mic_hw = { .name = "VX222/Mic", .type = VX_TYPE_MIC, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; /* */ static int snd_vx222_free(struct vx_core *chip) { struct snd_vx222 *vx = (struct snd_vx222 *)chip; if (chip->irq >= 0) free_irq(chip->irq, (void*)chip); if (vx->port[0]) pci_release_regions(vx->pci); pci_disable_device(vx->pci); kfree(chip); return 0; } static int snd_vx222_dev_free(struct snd_device *device) { struct vx_core *chip = device->device_data; return snd_vx222_free(chip); } static int __devinit snd_vx222_create(struct snd_card *card, struct pci_dev *pci, struct snd_vx_hardware *hw, struct snd_vx222 **rchip) { struct vx_core *chip; struct snd_vx222 *vx; int i, err; static struct snd_device_ops ops = { .dev_free = snd_vx222_dev_free, }; struct snd_vx_ops *vx_ops; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; pci_set_master(pci); vx_ops = hw->type == VX_TYPE_BOARD ? &vx222_old_ops : &vx222_ops; chip = snd_vx_create(card, hw, vx_ops, sizeof(struct snd_vx222) - sizeof(struct vx_core)); if (! chip) { pci_disable_device(pci); return -ENOMEM; } vx = (struct snd_vx222 *)chip; vx->pci = pci; if ((err = pci_request_regions(pci, CARD_NAME)) < 0) { snd_vx222_free(chip); return err; } for (i = 0; i < 2; i++) vx->port[i] = pci_resource_start(pci, i + 1); if (request_irq(pci->irq, snd_vx_irq_handler, IRQF_SHARED, CARD_NAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_vx222_free(chip); return -EBUSY; } chip->irq = pci->irq; if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_vx222_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = vx; return 0; } static int __devinit snd_vx222_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_vx_hardware *hw; struct snd_vx222 *vx; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch ((int)pci_id->driver_data) { case VX_PCI_VX222_OLD: hw = &vx222_old_hw; break; case VX_PCI_VX222_NEW: default: if (mic[dev]) hw = &vx222_mic_hw; else hw = &vx222_v2_hw; break; } if ((err = snd_vx222_create(card, pci, hw, &vx)) < 0) { snd_card_free(card); return err; } card->private_data = vx; vx->core.ibl.size = ibl[dev]; sprintf(card->longname, "%s at 0x%lx & 0x%lx, irq %i", card->shortname, vx->port[0], vx->port[1], vx->core.irq); snd_printdd("%s at 0x%lx & 0x%lx, irq %i\n", card->shortname, vx->port[0], vx->port[1], vx->core.irq); #ifdef SND_VX_FW_LOADER vx->core.dev = &pci->dev; #endif if ((err = snd_vx_setup_firmware(&vx->core)) < 0) { snd_card_free(card); return err; } if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_vx222_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM static int snd_vx222_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_vx222 *vx = card->private_data; int err; err = snd_vx_suspend(&vx->core, state); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return err; } static int snd_vx222_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_vx222 *vx = card->private_data; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "vx222: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); return snd_vx_resume(&vx->core); } #endif static struct pci_driver driver = { .name = "Digigram VX222", .id_table = snd_vx222_ids, .probe = snd_vx222_probe, .remove = __devexit_p(snd_vx222_remove), #ifdef CONFIG_PM .suspend = snd_vx222_suspend, .resume = snd_vx222_resume, #endif }; static int __init alsa_card_vx222_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_vx222_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_vx222_init) module_exit(alsa_card_vx222_exit)
gpl-2.0
R-M-S/RMS_DragunKernal_V.10-MAX-8-29-2012_3.0.42
drivers/usb/serial/safe_serial.c
4144
11581
/* * Safe Encapsulated USB Serial Driver * * Copyright (C) 2010 Johan Hovold <jhovold@gmail.com> * Copyright (C) 2001 Lineo * Copyright (C) 2001 Hewlett-Packard * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * By: * Stuart Lynne <sl@lineo.com>, Tom Rushworth <tbr@lineo.com> */ /* * The encapsultaion is designed to overcome difficulties with some USB * hardware. * * While the USB protocol has a CRC over the data while in transit, i.e. while * being carried over the bus, there is no end to end protection. If the * hardware has any problems getting the data into or out of the USB transmit * and receive FIFO's then data can be lost. * * This protocol adds a two byte trailer to each USB packet to specify the * number of bytes of valid data and a 10 bit CRC that will allow the receiver * to verify that the entire USB packet was received without error. * * Because in this case the sender and receiver are the class and function * drivers there is now end to end protection. * * There is an additional option that can be used to force all transmitted * packets to be padded to the maximum packet size. This provides a work * around for some devices which have problems with small USB packets. * * Assuming a packetsize of N: * * 0..N-2 data and optional padding * * N-2 bits 7-2 - number of bytes of valid data * bits 1-0 top two bits of 10 bit CRC * N-1 bottom 8 bits of 10 bit CRC * * * | Data Length | 10 bit CRC | * + 7 . 6 . 5 . 4 . 3 . 2 . 1 . 0 | 7 . 6 . 5 . 4 . 3 . 2 . 1 . 0 + * * The 10 bit CRC is computed across the sent data, followed by the trailer * with the length set and the CRC set to zero. The CRC is then OR'd into * the trailer. * * When received a 10 bit CRC is computed over the entire frame including * the trailer and should be equal to zero. * * Two module parameters are used to control the encapsulation, if both are * turned of the module works as a simple serial device with NO * encapsulation. * * See linux/drivers/usbd/serial_fd for a device function driver * implementation of this. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #ifndef CONFIG_USB_SERIAL_SAFE_PADDED #define CONFIG_USB_SERIAL_SAFE_PADDED 0 #endif static int debug; static int safe = 1; static int padded = CONFIG_USB_SERIAL_SAFE_PADDED; #define DRIVER_VERSION "v0.1" #define DRIVER_AUTHOR "sl@lineo.com, tbr@lineo.com, Johan Hovold <jhovold@gmail.com>" #define DRIVER_DESC "USB Safe Encapsulated Serial" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static __u16 vendor; /* no default */ static __u16 product; /* no default */ module_param(vendor, ushort, 0); MODULE_PARM_DESC(vendor, "User specified USB idVendor (required)"); module_param(product, ushort, 0); MODULE_PARM_DESC(product, "User specified USB idProduct (required)"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not"); module_param(safe, bool, 0); MODULE_PARM_DESC(safe, "Turn Safe Encapsulation On/Off"); module_param(padded, bool, 0); MODULE_PARM_DESC(padded, "Pad to full wMaxPacketSize On/Off"); #define CDC_DEVICE_CLASS 0x02 #define CDC_INTERFACE_CLASS 0x02 #define CDC_INTERFACE_SUBCLASS 0x06 #define LINEO_INTERFACE_CLASS 0xff #define LINEO_INTERFACE_SUBCLASS_SAFENET 0x01 #define LINEO_SAFENET_CRC 0x01 #define LINEO_SAFENET_CRC_PADDED 0x02 #define LINEO_INTERFACE_SUBCLASS_SAFESERIAL 0x02 #define LINEO_SAFESERIAL_CRC 0x01 #define LINEO_SAFESERIAL_CRC_PADDED 0x02 #define MY_USB_DEVICE(vend, prod, dc, ic, isc) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_DEV_CLASS | \ USB_DEVICE_ID_MATCH_INT_CLASS | \ USB_DEVICE_ID_MATCH_INT_SUBCLASS, \ .idVendor = (vend), \ .idProduct = (prod),\ .bDeviceClass = (dc),\ .bInterfaceClass = (ic), \ .bInterfaceSubClass = (isc), static struct usb_device_id id_table[] = { {MY_USB_DEVICE(0x49f, 0xffff, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Itsy */ {MY_USB_DEVICE(0x3f0, 0x2101, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Calypso */ {MY_USB_DEVICE(0x4dd, 0x8001, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Iris */ {MY_USB_DEVICE(0x4dd, 0x8002, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Collie */ {MY_USB_DEVICE(0x4dd, 0x8003, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Collie */ {MY_USB_DEVICE(0x4dd, 0x8004, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Collie */ {MY_USB_DEVICE(0x5f9, 0xffff, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Sharp tmp */ /* extra null entry for module vendor/produc parameters */ {MY_USB_DEVICE(0, 0, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, {} /* terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_driver safe_driver = { .name = "safe_serial", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table, .no_dynamic_id = 1, }; static const __u16 crc10_table[256] = { 0x000, 0x233, 0x255, 0x066, 0x299, 0x0aa, 0x0cc, 0x2ff, 0x301, 0x132, 0x154, 0x367, 0x198, 0x3ab, 0x3cd, 0x1fe, 0x031, 0x202, 0x264, 0x057, 0x2a8, 0x09b, 0x0fd, 0x2ce, 0x330, 0x103, 0x165, 0x356, 0x1a9, 0x39a, 0x3fc, 0x1cf, 0x062, 0x251, 0x237, 0x004, 0x2fb, 0x0c8, 0x0ae, 0x29d, 0x363, 0x150, 0x136, 0x305, 0x1fa, 0x3c9, 0x3af, 0x19c, 0x053, 0x260, 0x206, 0x035, 0x2ca, 0x0f9, 0x09f, 0x2ac, 0x352, 0x161, 0x107, 0x334, 0x1cb, 0x3f8, 0x39e, 0x1ad, 0x0c4, 0x2f7, 0x291, 0x0a2, 0x25d, 0x06e, 0x008, 0x23b, 0x3c5, 0x1f6, 0x190, 0x3a3, 0x15c, 0x36f, 0x309, 0x13a, 0x0f5, 0x2c6, 0x2a0, 0x093, 0x26c, 0x05f, 0x039, 0x20a, 0x3f4, 0x1c7, 0x1a1, 0x392, 0x16d, 0x35e, 0x338, 0x10b, 0x0a6, 0x295, 0x2f3, 0x0c0, 0x23f, 0x00c, 0x06a, 0x259, 0x3a7, 0x194, 0x1f2, 0x3c1, 0x13e, 0x30d, 0x36b, 0x158, 0x097, 0x2a4, 0x2c2, 0x0f1, 0x20e, 0x03d, 0x05b, 0x268, 0x396, 0x1a5, 0x1c3, 0x3f0, 0x10f, 0x33c, 0x35a, 0x169, 0x188, 0x3bb, 0x3dd, 0x1ee, 0x311, 0x122, 0x144, 0x377, 0x289, 0x0ba, 0x0dc, 0x2ef, 0x010, 0x223, 0x245, 0x076, 0x1b9, 0x38a, 0x3ec, 0x1df, 0x320, 0x113, 0x175, 0x346, 0x2b8, 0x08b, 0x0ed, 0x2de, 0x021, 0x212, 0x274, 0x047, 0x1ea, 0x3d9, 0x3bf, 0x18c, 0x373, 0x140, 0x126, 0x315, 0x2eb, 0x0d8, 0x0be, 0x28d, 0x072, 0x241, 0x227, 0x014, 0x1db, 0x3e8, 0x38e, 0x1bd, 0x342, 0x171, 0x117, 0x324, 0x2da, 0x0e9, 0x08f, 0x2bc, 0x043, 0x270, 0x216, 0x025, 0x14c, 0x37f, 0x319, 0x12a, 0x3d5, 0x1e6, 0x180, 0x3b3, 0x24d, 0x07e, 0x018, 0x22b, 0x0d4, 0x2e7, 0x281, 0x0b2, 0x17d, 0x34e, 0x328, 0x11b, 0x3e4, 0x1d7, 0x1b1, 0x382, 0x27c, 0x04f, 0x029, 0x21a, 0x0e5, 0x2d6, 0x2b0, 0x083, 0x12e, 0x31d, 0x37b, 0x148, 0x3b7, 0x184, 0x1e2, 0x3d1, 0x22f, 0x01c, 0x07a, 0x249, 0x0b6, 0x285, 0x2e3, 0x0d0, 0x11f, 0x32c, 0x34a, 0x179, 0x386, 0x1b5, 0x1d3, 0x3e0, 0x21e, 0x02d, 0x04b, 0x278, 0x087, 0x2b4, 0x2d2, 0x0e1, }; #define CRC10_INITFCS 0x000 /* Initial FCS value */ #define CRC10_GOODFCS 0x000 /* Good final FCS value */ #define CRC10_FCS(fcs, c) ((((fcs) << 8) & 0x3ff) ^ crc10_table[((fcs) >> 2) & 0xff] ^ (c)) /** * fcs_compute10 - memcpy and calculate 10 bit CRC across buffer * @sp: pointer to buffer * @len: number of bytes * @fcs: starting FCS * * Perform a memcpy and calculate fcs using ppp 10bit CRC algorithm. Return * new 10 bit FCS. */ static __u16 __inline__ fcs_compute10(unsigned char *sp, int len, __u16 fcs) { for (; len-- > 0; fcs = CRC10_FCS(fcs, *sp++)); return fcs; } static void safe_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; unsigned char *data = urb->transfer_buffer; unsigned char length = urb->actual_length; int actual_length; struct tty_struct *tty; __u16 fcs; if (!length) return; tty = tty_port_tty_get(&port->port); if (!tty) return; if (!safe) goto out; fcs = fcs_compute10(data, length, CRC10_INITFCS); if (fcs) { dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs); goto err; } actual_length = data[length - 2] >> 2; if (actual_length > (length - 2)) { dev_err(&port->dev, "%s - inconsistent lengths %d:%d\n", __func__, actual_length, length); goto err; } dev_info(&urb->dev->dev, "%s - actual: %d\n", __func__, actual_length); length = actual_length; out: tty_insert_flip_string(tty, data, length); tty_flip_buffer_push(tty); err: tty_kref_put(tty); } static int safe_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size) { unsigned char *buf = dest; int count; int trailer_len; int pkt_len; __u16 fcs; trailer_len = safe ? 2 : 0; count = kfifo_out_locked(&port->write_fifo, buf, size - trailer_len, &port->lock); if (!safe) return count; /* pad if necessary */ if (padded) { pkt_len = size; memset(buf + count, '0', pkt_len - count - trailer_len); } else { pkt_len = count + trailer_len; } /* set count */ buf[pkt_len - 2] = count << 2; buf[pkt_len - 1] = 0; /* compute fcs and insert into trailer */ fcs = fcs_compute10(buf, pkt_len, CRC10_INITFCS); buf[pkt_len - 2] |= fcs >> 8; buf[pkt_len - 1] |= fcs & 0xff; return pkt_len; } static int safe_startup(struct usb_serial *serial) { switch (serial->interface->cur_altsetting->desc.bInterfaceProtocol) { case LINEO_SAFESERIAL_CRC: break; case LINEO_SAFESERIAL_CRC_PADDED: padded = 1; break; default: return -EINVAL; } return 0; } static struct usb_serial_driver safe_device = { .driver = { .owner = THIS_MODULE, .name = "safe_serial", }, .id_table = id_table, .usb_driver = &safe_driver, .num_ports = 1, .process_read_urb = safe_process_read_urb, .prepare_write_buffer = safe_prepare_write_buffer, .attach = safe_startup, }; static int __init safe_init(void) { int i, retval; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); /* if we have vendor / product parameters patch them into id list */ if (vendor || product) { printk(KERN_INFO KBUILD_MODNAME ": vendor: %x product: %x\n", vendor, product); for (i = 0; i < ARRAY_SIZE(id_table); i++) { if (!id_table[i].idVendor && !id_table[i].idProduct) { id_table[i].idVendor = vendor; id_table[i].idProduct = product; break; } } } retval = usb_serial_register(&safe_device); if (retval) goto failed_usb_serial_register; retval = usb_register(&safe_driver); if (retval) goto failed_usb_register; return 0; failed_usb_register: usb_serial_deregister(&safe_device); failed_usb_serial_register: return retval; } static void __exit safe_exit(void) { usb_deregister(&safe_driver); usb_serial_deregister(&safe_device); } module_init(safe_init); module_exit(safe_exit);
gpl-2.0
RepoBackups/kernel_lge_g3
arch/s390/mm/maccess.c
4400
3785
/* * Access kernel memory without faulting -- s390 specific implementation. * * Copyright IBM Corp. 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * */ #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/gfp.h> #include <asm/ctl_reg.h> /* * This function writes to kernel memory bypassing DAT and possible * write protection. It copies one to four bytes from src to dst * using the stura instruction. * Returns the number of bytes copied or -EFAULT. */ static long probe_kernel_write_odd(void *dst, const void *src, size_t size) { unsigned long count, aligned; int offset, mask; int rc = -EFAULT; aligned = (unsigned long) dst & ~3UL; offset = (unsigned long) dst & 3; count = min_t(unsigned long, 4 - offset, size); mask = (0xf << (4 - count)) & 0xf; mask >>= offset; asm volatile( " bras 1,0f\n" " icm 0,0,0(%3)\n" "0: l 0,0(%1)\n" " lra %1,0(%1)\n" "1: ex %2,0(1)\n" "2: stura 0,%1\n" " la %0,0\n" "3:\n" EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b) : "+d" (rc), "+a" (aligned) : "a" (mask), "a" (src) : "cc", "memory", "0", "1"); return rc ? rc : count; } long probe_kernel_write(void *dst, const void *src, size_t size) { long copied = 0; while (size) { copied = probe_kernel_write_odd(dst, src, size); if (copied < 0) break; dst += copied; src += copied; size -= copied; } return copied < 0 ? -EFAULT : 0; } static int __memcpy_real(void *dest, void *src, size_t count) { register unsigned long _dest asm("2") = (unsigned long) dest; register unsigned long _len1 asm("3") = (unsigned long) count; register unsigned long _src asm("4") = (unsigned long) src; register unsigned long _len2 asm("5") = (unsigned long) count; int rc = -EFAULT; asm volatile ( "0: mvcle %1,%2,0x0\n" "1: jo 0b\n" " lhi %0,0x0\n" "2:\n" EX_TABLE(1b,2b) : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1), "+d" (_len2), "=m" (*((long *) dest)) : "m" (*((long *) src)) : "cc", "memory"); return rc; } /* * Copy memory in real mode (kernel to kernel) */ int memcpy_real(void *dest, void *src, size_t count) { unsigned long flags; int rc; if (!count) return 0; local_irq_save(flags); __arch_local_irq_stnsm(0xfbUL); rc = __memcpy_real(dest, src, count); local_irq_restore(flags); return rc; } /* * Copy memory to absolute zero */ void copy_to_absolute_zero(void *dest, void *src, size_t count) { unsigned long cr0; BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore)); preempt_disable(); __ctl_store(cr0, 0, 0); __ctl_clear_bit(0, 28); /* disable lowcore protection */ memcpy_real(dest + store_prefix(), src, count); __ctl_load(cr0, 0, 0); preempt_enable(); } /* * Copy memory from kernel (real) to user (virtual) */ int copy_to_user_real(void __user *dest, void *src, size_t count) { int offs = 0, size, rc; char *buf; buf = (char *) __get_free_page(GFP_KERNEL); if (!buf) return -ENOMEM; rc = -EFAULT; while (offs < count) { size = min(PAGE_SIZE, count - offs); if (memcpy_real(buf, src + offs, size)) goto out; if (copy_to_user(dest + offs, buf, size)) goto out; offs += size; } rc = 0; out: free_page((unsigned long) buf); return rc; } /* * Copy memory from user (virtual) to kernel (real) */ int copy_from_user_real(void *dest, void __user *src, size_t count) { int offs = 0, size, rc; char *buf; buf = (char *) __get_free_page(GFP_KERNEL); if (!buf) return -ENOMEM; rc = -EFAULT; while (offs < count) { size = min(PAGE_SIZE, count - offs); if (copy_from_user(buf, src + offs, size)) goto out; if (memcpy_real(dest + offs, buf, size)) goto out; offs += size; } rc = 0; out: free_page((unsigned long) buf); return rc; }
gpl-2.0
vm03/android_kernel_lge_msm8610
drivers/video/via/via_i2c.c
9776
7263
/* * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/via-core.h> #include <linux/via_i2c.h> /* * There can only be one set of these, so there's no point in having * them be dynamically allocated... */ #define VIAFB_NUM_I2C 5 static struct via_i2c_stuff via_i2c_par[VIAFB_NUM_I2C]; static struct viafb_dev *i2c_vdev; /* Passed in from core */ static void via_i2c_setscl(void *data, int state) { u8 val; struct via_port_cfg *adap_data = data; unsigned long flags; spin_lock_irqsave(&i2c_vdev->reg_lock, flags); val = via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0xF0; if (state) val |= 0x20; else val &= ~0x20; switch (adap_data->type) { case VIA_PORT_I2C: val |= 0x01; break; case VIA_PORT_GPIO: val |= 0x82; break; default: printk(KERN_ERR "viafb_i2c: specify wrong i2c type.\n"); } via_write_reg(adap_data->io_port, adap_data->ioport_index, val); spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags); } static int via_i2c_getscl(void *data) { struct via_port_cfg *adap_data = data; unsigned long flags; int ret = 0; spin_lock_irqsave(&i2c_vdev->reg_lock, flags); if (adap_data->type == VIA_PORT_GPIO) via_write_reg_mask(adap_data->io_port, adap_data->ioport_index, 0, 0x80); if (via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0x08) ret = 1; spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags); return ret; } static int via_i2c_getsda(void *data) { struct via_port_cfg *adap_data = data; unsigned long flags; int ret = 0; spin_lock_irqsave(&i2c_vdev->reg_lock, flags); if (adap_data->type == VIA_PORT_GPIO) via_write_reg_mask(adap_data->io_port, adap_data->ioport_index, 0, 0x40); if (via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0x04) ret = 1; spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags); return ret; } static void via_i2c_setsda(void *data, int state) { u8 val; struct via_port_cfg *adap_data = data; unsigned long flags; spin_lock_irqsave(&i2c_vdev->reg_lock, flags); val = via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0xF0; if (state) val |= 0x10; else val &= ~0x10; switch (adap_data->type) { case VIA_PORT_I2C: val |= 0x01; break; case VIA_PORT_GPIO: val |= 0x42; break; default: printk(KERN_ERR "viafb_i2c: specify wrong i2c type.\n"); } via_write_reg(adap_data->io_port, adap_data->ioport_index, val); spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags); } int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata) { int ret; u8 mm1[] = {0x00}; struct i2c_msg msgs[2]; if (!via_i2c_par[adap].is_active) return -ENODEV; *pdata = 0; msgs[0].flags = 0; msgs[1].flags = I2C_M_RD; msgs[0].addr = msgs[1].addr = slave_addr / 2; mm1[0] = index; msgs[0].len = 1; msgs[1].len = 1; msgs[0].buf = mm1; msgs[1].buf = pdata; ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2); if (ret == 2) ret = 0; else if (ret >= 0) ret = -EIO; return ret; } int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data) { int ret; u8 msg[2] = { index, data }; struct i2c_msg msgs; if (!via_i2c_par[adap].is_active) return -ENODEV; msgs.flags = 0; msgs.addr = slave_addr / 2; msgs.len = 2; msgs.buf = msg; ret = i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1); if (ret == 1) ret = 0; else if (ret >= 0) ret = -EIO; return ret; } int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len) { int ret; u8 mm1[] = {0x00}; struct i2c_msg msgs[2]; if (!via_i2c_par[adap].is_active) return -ENODEV; msgs[0].flags = 0; msgs[1].flags = I2C_M_RD; msgs[0].addr = msgs[1].addr = slave_addr / 2; mm1[0] = index; msgs[0].len = 1; msgs[1].len = buff_len; msgs[0].buf = mm1; msgs[1].buf = buff; ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2); if (ret == 2) ret = 0; else if (ret >= 0) ret = -EIO; return ret; } /* * Allow other viafb subdevices to look up a specific adapter * by port name. */ struct i2c_adapter *viafb_find_i2c_adapter(enum viafb_i2c_adap which) { struct via_i2c_stuff *stuff = &via_i2c_par[which]; return &stuff->adapter; } EXPORT_SYMBOL_GPL(viafb_find_i2c_adapter); static int create_i2c_bus(struct i2c_adapter *adapter, struct i2c_algo_bit_data *algo, struct via_port_cfg *adap_cfg, struct pci_dev *pdev) { algo->setsda = via_i2c_setsda; algo->setscl = via_i2c_setscl; algo->getsda = via_i2c_getsda; algo->getscl = via_i2c_getscl; algo->udelay = 10; algo->timeout = 2; algo->data = adap_cfg; sprintf(adapter->name, "viafb i2c io_port idx 0x%02x", adap_cfg->ioport_index); adapter->owner = THIS_MODULE; adapter->class = I2C_CLASS_DDC; adapter->algo_data = algo; if (pdev) adapter->dev.parent = &pdev->dev; else adapter->dev.parent = NULL; /* i2c_set_adapdata(adapter, adap_cfg); */ /* Raise SCL and SDA */ via_i2c_setsda(adap_cfg, 1); via_i2c_setscl(adap_cfg, 1); udelay(20); return i2c_bit_add_bus(adapter); } static int viafb_i2c_probe(struct platform_device *platdev) { int i, ret; struct via_port_cfg *configs; i2c_vdev = platdev->dev.platform_data; configs = i2c_vdev->port_cfg; for (i = 0; i < VIAFB_NUM_PORTS; i++) { struct via_port_cfg *adap_cfg = configs++; struct via_i2c_stuff *i2c_stuff = &via_i2c_par[i]; i2c_stuff->is_active = 0; if (adap_cfg->type == 0 || adap_cfg->mode != VIA_MODE_I2C) continue; ret = create_i2c_bus(&i2c_stuff->adapter, &i2c_stuff->algo, adap_cfg, NULL); /* FIXME: PCIDEV */ if (ret < 0) { printk(KERN_ERR "viafb: cannot create i2c bus %u:%d\n", i, ret); continue; /* Still try to make the rest */ } i2c_stuff->is_active = 1; } return 0; } static int viafb_i2c_remove(struct platform_device *platdev) { int i; for (i = 0; i < VIAFB_NUM_PORTS; i++) { struct via_i2c_stuff *i2c_stuff = &via_i2c_par[i]; /* * Only remove those entries in the array that we've * actually used (and thus initialized algo_data) */ if (i2c_stuff->is_active) i2c_del_adapter(&i2c_stuff->adapter); } return 0; } static struct platform_driver via_i2c_driver = { .driver = { .name = "viafb-i2c", }, .probe = viafb_i2c_probe, .remove = viafb_i2c_remove, }; int viafb_i2c_init(void) { return platform_driver_register(&via_i2c_driver); } void viafb_i2c_exit(void) { platform_driver_unregister(&via_i2c_driver); }
gpl-2.0
DennisBold/CodeAurora-MSM-Kernel
drivers/staging/rtl8192u/ieee80211/michael_mic.c
12080
3669
/* * Cryptographic API * * Michael MIC (IEEE 802.11i/TKIP) keyed digest * * Copyright (c) 2004 Jouni Malinen <jkmaline@cc.hut.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> //#include <linux/crypto.h> #include "rtl_crypto.h" struct michael_mic_ctx { u8 pending[4]; size_t pending_len; u32 l, r; }; static inline u32 rotl(u32 val, int bits) { return (val << bits) | (val >> (32 - bits)); } static inline u32 rotr(u32 val, int bits) { return (val >> bits) | (val << (32 - bits)); } static inline u32 xswap(u32 val) { return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); } #define michael_block(l, r) \ do { \ r ^= rotl(l, 17); \ l += r; \ r ^= xswap(l); \ l += r; \ r ^= rotl(l, 3); \ l += r; \ r ^= rotr(l, 2); \ l += r; \ } while (0) static inline u32 get_le32(const u8 *p) { return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); } static inline void put_le32(u8 *p, u32 v) { p[0] = v; p[1] = v >> 8; p[2] = v >> 16; p[3] = v >> 24; } static void michael_init(void *ctx) { struct michael_mic_ctx *mctx = ctx; mctx->pending_len = 0; } static void michael_update(void *ctx, const u8 *data, unsigned int len) { struct michael_mic_ctx *mctx = ctx; if (mctx->pending_len) { int flen = 4 - mctx->pending_len; if (flen > len) flen = len; memcpy(&mctx->pending[mctx->pending_len], data, flen); mctx->pending_len += flen; data += flen; len -= flen; if (mctx->pending_len < 4) return; mctx->l ^= get_le32(mctx->pending); michael_block(mctx->l, mctx->r); mctx->pending_len = 0; } while (len >= 4) { mctx->l ^= get_le32(data); michael_block(mctx->l, mctx->r); data += 4; len -= 4; } if (len > 0) { mctx->pending_len = len; memcpy(mctx->pending, data, len); } } static void michael_final(void *ctx, u8 *out) { struct michael_mic_ctx *mctx = ctx; u8 *data = mctx->pending; /* Last block and padding (0x5a, 4..7 x 0) */ switch (mctx->pending_len) { case 0: mctx->l ^= 0x5a; break; case 1: mctx->l ^= data[0] | 0x5a00; break; case 2: mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000; break; case 3: mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) | 0x5a000000; break; } michael_block(mctx->l, mctx->r); /* l ^= 0; */ michael_block(mctx->l, mctx->r); put_le32(out, mctx->l); put_le32(out + 4, mctx->r); } static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) { struct michael_mic_ctx *mctx = ctx; if (keylen != 8) { if (flags) *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } mctx->l = get_le32(key); mctx->r = get_le32(key + 4); return 0; } static struct crypto_alg michael_mic_alg = { .cra_name = "michael_mic", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = 8, .cra_ctxsize = sizeof(struct michael_mic_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list), .cra_u = { .digest = { .dia_digestsize = 8, .dia_init = michael_init, .dia_update = michael_update, .dia_final = michael_final, .dia_setkey = michael_setkey } } }; static int __init michael_mic_init(void) { return crypto_register_alg(&michael_mic_alg); } static void __exit michael_mic_exit(void) { crypto_unregister_alg(&michael_mic_alg); } module_init(michael_mic_init); module_exit(michael_mic_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Michael MIC"); MODULE_AUTHOR("Jouni Malinen <jkmaline@cc.hut.fi>");
gpl-2.0
GladeRom/android_kernel_lge_g3
drivers/staging/rtl8192u/ieee80211/michael_mic.c
12080
3669
/* * Cryptographic API * * Michael MIC (IEEE 802.11i/TKIP) keyed digest * * Copyright (c) 2004 Jouni Malinen <jkmaline@cc.hut.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> //#include <linux/crypto.h> #include "rtl_crypto.h" struct michael_mic_ctx { u8 pending[4]; size_t pending_len; u32 l, r; }; static inline u32 rotl(u32 val, int bits) { return (val << bits) | (val >> (32 - bits)); } static inline u32 rotr(u32 val, int bits) { return (val >> bits) | (val << (32 - bits)); } static inline u32 xswap(u32 val) { return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); } #define michael_block(l, r) \ do { \ r ^= rotl(l, 17); \ l += r; \ r ^= xswap(l); \ l += r; \ r ^= rotl(l, 3); \ l += r; \ r ^= rotr(l, 2); \ l += r; \ } while (0) static inline u32 get_le32(const u8 *p) { return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); } static inline void put_le32(u8 *p, u32 v) { p[0] = v; p[1] = v >> 8; p[2] = v >> 16; p[3] = v >> 24; } static void michael_init(void *ctx) { struct michael_mic_ctx *mctx = ctx; mctx->pending_len = 0; } static void michael_update(void *ctx, const u8 *data, unsigned int len) { struct michael_mic_ctx *mctx = ctx; if (mctx->pending_len) { int flen = 4 - mctx->pending_len; if (flen > len) flen = len; memcpy(&mctx->pending[mctx->pending_len], data, flen); mctx->pending_len += flen; data += flen; len -= flen; if (mctx->pending_len < 4) return; mctx->l ^= get_le32(mctx->pending); michael_block(mctx->l, mctx->r); mctx->pending_len = 0; } while (len >= 4) { mctx->l ^= get_le32(data); michael_block(mctx->l, mctx->r); data += 4; len -= 4; } if (len > 0) { mctx->pending_len = len; memcpy(mctx->pending, data, len); } } static void michael_final(void *ctx, u8 *out) { struct michael_mic_ctx *mctx = ctx; u8 *data = mctx->pending; /* Last block and padding (0x5a, 4..7 x 0) */ switch (mctx->pending_len) { case 0: mctx->l ^= 0x5a; break; case 1: mctx->l ^= data[0] | 0x5a00; break; case 2: mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000; break; case 3: mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) | 0x5a000000; break; } michael_block(mctx->l, mctx->r); /* l ^= 0; */ michael_block(mctx->l, mctx->r); put_le32(out, mctx->l); put_le32(out + 4, mctx->r); } static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) { struct michael_mic_ctx *mctx = ctx; if (keylen != 8) { if (flags) *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } mctx->l = get_le32(key); mctx->r = get_le32(key + 4); return 0; } static struct crypto_alg michael_mic_alg = { .cra_name = "michael_mic", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = 8, .cra_ctxsize = sizeof(struct michael_mic_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list), .cra_u = { .digest = { .dia_digestsize = 8, .dia_init = michael_init, .dia_update = michael_update, .dia_final = michael_final, .dia_setkey = michael_setkey } } }; static int __init michael_mic_init(void) { return crypto_register_alg(&michael_mic_alg); } static void __exit michael_mic_exit(void) { crypto_unregister_alg(&michael_mic_alg); } module_init(michael_mic_init); module_exit(michael_mic_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Michael MIC"); MODULE_AUTHOR("Jouni Malinen <jkmaline@cc.hut.fi>");
gpl-2.0
niamster/uboot-marvell-openrd-ultimate
cpu/ppc4xx/commproc.c
49
1843
/* * (C) Copyright 2000-2004 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * * * Atapted for ppc4XX by Denis Peter */ #include <common.h> #include <commproc.h> #if defined(CONFIG_POST) || defined(CONFIG_LOGBUFFER) void post_word_store (ulong a) { volatile void *save_addr = (volatile void *)(CFG_OCM_DATA_ADDR + CFG_POST_WORD_ADDR); *(volatile ulong *) save_addr = a; } ulong post_word_load (void) { volatile void *save_addr = (volatile void *)(CFG_OCM_DATA_ADDR + CFG_POST_WORD_ADDR); return *(volatile ulong *) save_addr; } #endif /* CONFIG_POST || CONFIG_LOGBUFFER*/ #ifdef CONFIG_BOOTCOUNT_LIMIT void bootcount_store (ulong a) { volatile ulong *save_addr = (volatile ulong *)(CFG_OCM_DATA_ADDR + CFG_BOOTCOUNT_ADDR); save_addr[0] = a; save_addr[1] = BOOTCOUNT_MAGIC; } ulong bootcount_load (void) { volatile ulong *save_addr = (volatile ulong *)(CFG_OCM_DATA_ADDR + CFG_BOOTCOUNT_ADDR); if (save_addr[1] != BOOTCOUNT_MAGIC) return 0; else return save_addr[0]; } #endif /* CONFIG_BOOTCOUNT_LIMIT */
gpl-2.0
Cpasjuste/ioquake3
code/tools/lcc/cpp/macro.c
49
10684
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "cpp.h" /* * do a macro definition. tp points to the name being defined in the line */ void dodefine(Tokenrow *trp) { Token *tp; Nlist *np; Tokenrow *def, *args; tp = trp->tp+1; if (tp>=trp->lp || tp->type!=NAME) { error(ERROR, "#defined token is not a name"); return; } np = lookup(tp, 1); if (np->flag&ISUNCHANGE) { error(ERROR, "#defined token %t can't be redefined", tp); return; } /* collect arguments */ tp += 1; args = NULL; if (tp<trp->lp && tp->type==LP && tp->wslen==0) { /* macro with args */ int narg = 0; tp += 1; args = new(Tokenrow); maketokenrow(2, args); if (tp->type!=RP) { int err = 0; for (;;) { Token *atp; if (tp->type!=NAME) { err++; break; } if (narg>=args->max) growtokenrow(args); for (atp=args->bp; atp<args->lp; atp++) if (atp->len==tp->len && strncmp((char*)atp->t, (char*)tp->t, tp->len)==0) error(ERROR, "Duplicate macro argument"); *args->lp++ = *tp; narg++; tp += 1; if (tp->type==RP) break; if (tp->type!=COMMA) { err++; break; } tp += 1; } if (err) { error(ERROR, "Syntax error in macro parameters"); return; } } tp += 1; } trp->tp = tp; if (((trp->lp)-1)->type==NL) trp->lp -= 1; def = normtokenrow(trp); if (np->flag&ISDEFINED) { if (comparetokens(def, np->vp) || (np->ap==NULL) != (args==NULL) || (np->ap && comparetokens(args, np->ap))) error(ERROR, "Macro redefinition of %t", trp->bp+2); } if (args) { Tokenrow *tap; tap = normtokenrow(args); dofree(args->bp); args = tap; } np->ap = args; np->vp = def; np->flag |= ISDEFINED; } /* * Definition received via -D or -U */ void doadefine(Tokenrow *trp, int type) { Nlist *np; static Token onetoken[1] = {{ NUMBER, 0, 0, 0, 1, (uchar*)"1" }}; static Tokenrow onetr = { onetoken, onetoken, onetoken+1, 1 }; trp->tp = trp->bp; if (type=='U') { if (trp->lp-trp->tp != 2 || trp->tp->type!=NAME) goto syntax; if ((np = lookup(trp->tp, 0)) == NULL) return; np->flag &= ~ISDEFINED; return; } if (trp->tp >= trp->lp || trp->tp->type!=NAME) goto syntax; np = lookup(trp->tp, 1); np->flag |= ISDEFINED; trp->tp += 1; if (trp->tp >= trp->lp || trp->tp->type==END) { np->vp = &onetr; return; } if (trp->tp->type!=ASGN) goto syntax; trp->tp += 1; if ((trp->lp-1)->type == END) trp->lp -= 1; np->vp = normtokenrow(trp); return; syntax: error(FATAL, "Illegal -D or -U argument %r", trp); } /* * Do macro expansion in a row of tokens. * Flag is NULL if more input can be gathered. */ void expandrow(Tokenrow *trp, char *flag) { Token *tp; Nlist *np; if (flag) setsource(flag, -1, ""); for (tp = trp->tp; tp<trp->lp; ) { if (tp->type!=NAME || quicklook(tp->t[0], tp->len>1?tp->t[1]:0)==0 || (np = lookup(tp, 0))==NULL || (np->flag&(ISDEFINED|ISMAC))==0 || (tp->hideset && checkhideset(tp->hideset, np))) { tp++; continue; } trp->tp = tp; if (np->val==KDEFINED) { tp->type = DEFINED; if ((tp+1)<trp->lp && (tp+1)->type==NAME) (tp+1)->type = NAME1; else if ((tp+3)<trp->lp && (tp+1)->type==LP && (tp+2)->type==NAME && (tp+3)->type==RP) (tp+2)->type = NAME1; else error(ERROR, "Incorrect syntax for `defined'"); tp++; continue; } if (np->flag&ISMAC) builtin(trp, np->val); else { expand(trp, np); } tp = trp->tp; } if (flag) unsetsource(); } /* * Expand the macro whose name is np, at token trp->tp, in the tokenrow. * Return trp->tp at the first token next to be expanded * (ordinarily the beginning of the expansion) */ void expand(Tokenrow *trp, Nlist *np) { Tokenrow ntr; int ntokc, narg, i; Token *tp; Tokenrow *atr[NARG+1]; int hs; copytokenrow(&ntr, np->vp); /* copy macro value */ if (np->ap==NULL) /* parameterless */ ntokc = 1; else { ntokc = gatherargs(trp, atr, &narg); if (narg<0) { /* not actually a call (no '(') */ trp->tp++; return; } if (narg != rowlen(np->ap)) { error(ERROR, "Disagreement in number of macro arguments"); trp->tp->hideset = newhideset(trp->tp->hideset, np); trp->tp += ntokc; return; } substargs(np, &ntr, atr); /* put args into replacement */ for (i=0; i<narg; i++) { dofree(atr[i]->bp); dofree(atr[i]); } } doconcat(&ntr); /* execute ## operators */ hs = newhideset(trp->tp->hideset, np); for (tp=ntr.bp; tp<ntr.lp; tp++) { /* distribute hidesets */ if (tp->type==NAME) { if (tp->hideset==0) tp->hideset = hs; else tp->hideset = unionhideset(tp->hideset, hs); } } ntr.tp = ntr.bp; insertrow(trp, ntokc, &ntr); trp->tp -= rowlen(&ntr); dofree(ntr.bp); return; } /* * Gather an arglist, starting in trp with tp pointing at the macro name. * Return total number of tokens passed, stash number of args found. * trp->tp is not changed relative to the tokenrow. */ int gatherargs(Tokenrow *trp, Tokenrow **atr, int *narg) { int parens = 1; int ntok = 0; Token *bp, *lp; Tokenrow ttr; int ntokp; int needspace; *narg = -1; /* means that there is no macro call */ /* look for the ( */ for (;;) { trp->tp++; ntok++; if (trp->tp >= trp->lp) { gettokens(trp, 0); if ((trp->lp-1)->type==END) { trp->lp -= 1; trp->tp -= ntok; return ntok; } } if (trp->tp->type==LP) break; if (trp->tp->type!=NL) return ntok; } *narg = 0; ntok++; ntokp = ntok; trp->tp++; /* search for the terminating ), possibly extending the row */ needspace = 0; while (parens>0) { if (trp->tp >= trp->lp) gettokens(trp, 0); if (needspace) { needspace = 0; makespace(trp); } if (trp->tp->type==END) { trp->lp -= 1; trp->tp -= ntok; error(ERROR, "EOF in macro arglist"); return ntok; } if (trp->tp->type==NL) { trp->tp += 1; adjustrow(trp, -1); trp->tp -= 1; makespace(trp); needspace = 1; continue; } if (trp->tp->type==LP) parens++; else if (trp->tp->type==RP) parens--; trp->tp++; ntok++; } trp->tp -= ntok; /* Now trp->tp won't move underneath us */ lp = bp = trp->tp+ntokp; for (; parens>=0; lp++) { if (lp->type == LP) { parens++; continue; } if (lp->type==RP) parens--; if (lp->type==DSHARP) lp->type = DSHARP1; /* ## not special in arg */ if ((lp->type==COMMA && parens==0) || (parens<0 && (lp-1)->type!=LP)) { if (*narg>=NARG-1) error(FATAL, "Sorry, too many macro arguments"); ttr.bp = ttr.tp = bp; ttr.lp = lp; atr[(*narg)++] = normtokenrow(&ttr); bp = lp+1; } } return ntok; } /* * substitute the argument list into the replacement string * This would be simple except for ## and # */ void substargs(Nlist *np, Tokenrow *rtr, Tokenrow **atr) { Tokenrow tatr; Token *tp; int ntok, argno; for (rtr->tp=rtr->bp; rtr->tp<rtr->lp; ) { if (rtr->tp->type==SHARP) { /* string operator */ tp = rtr->tp; rtr->tp += 1; if ((argno = lookuparg(np, rtr->tp))<0) { error(ERROR, "# not followed by macro parameter"); continue; } ntok = 1 + (rtr->tp - tp); rtr->tp = tp; insertrow(rtr, ntok, stringify(atr[argno])); continue; } if (rtr->tp->type==NAME && (argno = lookuparg(np, rtr->tp)) >= 0) { if ((rtr->tp+1)->type==DSHARP || (rtr->tp!=rtr->bp && (rtr->tp-1)->type==DSHARP)) insertrow(rtr, 1, atr[argno]); else { copytokenrow(&tatr, atr[argno]); expandrow(&tatr, "<macro>"); insertrow(rtr, 1, &tatr); dofree(tatr.bp); } continue; } rtr->tp++; } } /* * Evaluate the ## operators in a tokenrow */ void doconcat(Tokenrow *trp) { Token *ltp, *ntp; Tokenrow ntr; int len; for (trp->tp=trp->bp; trp->tp<trp->lp; trp->tp++) { if (trp->tp->type==DSHARP1) trp->tp->type = DSHARP; else if (trp->tp->type==DSHARP) { char tt[128]; ltp = trp->tp-1; ntp = trp->tp+1; if (ltp<trp->bp || ntp>=trp->lp) { error(ERROR, "## occurs at border of replacement"); continue; } len = ltp->len + ntp->len; strncpy((char*)tt, (char*)ltp->t, ltp->len); strncpy((char*)tt+ltp->len, (char*)ntp->t, ntp->len); tt[len] = '\0'; setsource("<##>", -1, tt); maketokenrow(3, &ntr); gettokens(&ntr, 1); unsetsource(); if (ntr.lp-ntr.bp!=2 || ntr.bp->type==UNCLASS) error(WARNING, "Bad token %r produced by ##", &ntr); ntr.lp = ntr.bp+1; trp->tp = ltp; makespace(&ntr); insertrow(trp, (ntp-ltp)+1, &ntr); dofree(ntr.bp); trp->tp--; } } } /* * tp is a potential parameter name of macro mac; * look it up in mac's arglist, and if found, return the * corresponding index in the argname array. Return -1 if not found. */ int lookuparg(Nlist *mac, Token *tp) { Token *ap; if (tp->type!=NAME || mac->ap==NULL) return -1; for (ap=mac->ap->bp; ap<mac->ap->lp; ap++) { if (ap->len==tp->len && strncmp((char*)ap->t,(char*)tp->t,ap->len)==0) return ap - mac->ap->bp; } return -1; } /* * Return a quoted version of the tokenrow (from # arg) */ #define STRLEN 512 Tokenrow * stringify(Tokenrow *vp) { static Token t = { STRING }; static Tokenrow tr = { &t, &t, &t+1, 1 }; Token *tp; uchar s[STRLEN]; uchar *sp = s, *cp; int i, instring; *sp++ = '"'; for (tp = vp->bp; tp < vp->lp; tp++) { instring = tp->type==STRING || tp->type==CCON; if (sp+2*tp->len >= &s[STRLEN-10]) { error(ERROR, "Stringified macro arg is too long"); break; } if (tp->wslen && (tp->flag&XPWS)==0) *sp++ = ' '; for (i=0, cp=tp->t; i<tp->len; i++) { if (instring && (*cp=='"' || *cp=='\\')) *sp++ = '\\'; *sp++ = *cp++; } } *sp++ = '"'; *sp = '\0'; sp = s; t.len = strlen((char*)sp); t.t = newstring(sp, t.len, 0); return &tr; } /* * expand a builtin name */ void builtin(Tokenrow *trp, int biname) { char *op; Token *tp; Source *s; tp = trp->tp; trp->tp++; /* need to find the real source */ s = cursource; while (s && s->fd==-1) s = s->next; if (s==NULL) s = cursource; /* most are strings */ tp->type = STRING; if (tp->wslen) { *outp++ = ' '; tp->wslen = 1; } op = outp; *op++ = '"'; switch (biname) { case KLINENO: tp->type = NUMBER; op = outnum(op-1, s->line); break; case KFILE: { char *src = s->filename; while ((*op++ = *src++) != 0) if (src[-1] == '\\') *op++ = '\\'; op--; break; } case KDATE: strncpy(op, curtime+4, 7); strncpy(op+7, curtime+20, 4); op += 11; break; case KTIME: strncpy(op, curtime+11, 8); op += 8; break; default: error(ERROR, "cpp botch: unknown internal macro"); return; } if (tp->type==STRING) *op++ = '"'; tp->t = (uchar*)outp; tp->len = op - outp; outp = op; }
gpl-2.0
weitengchu/linux-emcraft
drivers/net/usb/rndis_host.c
49
19049
/* * Host Side support for RNDIS Networking Links * Copyright (C) 2005 by David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/usb/rndis_host.h> /* * RNDIS is NDIS remoted over USB. It's a MSFT variant of CDC ACM ... of * course ACM was intended for modems, not Ethernet links! USB's standard * for Ethernet links is "CDC Ethernet", which is significantly simpler. * * NOTE that Microsoft's "RNDIS 1.0" specification is incomplete. Issues * include: * - Power management in particular relies on information that's scattered * through other documentation, and which is incomplete or incorrect even * there. * - There are various undocumented protocol requirements, such as the * need to send unused garbage in control-OUT messages. * - In some cases, MS-Windows will emit undocumented requests; this * matters more to peripheral implementations than host ones. * * Moreover there's a no-open-specs variant of RNDIS called "ActiveSync". * * For these reasons and others, ** USE OF RNDIS IS STRONGLY DISCOURAGED ** in * favor of such non-proprietary alternatives as CDC Ethernet or the newer (and * currently rare) "Ethernet Emulation Model" (EEM). */ /* * RNDIS notifications from device: command completion; "reverse" * keepalives; etc */ void rndis_status(struct usbnet *dev, struct urb *urb) { devdbg(dev, "rndis status urb, len %d stat %d", urb->actual_length, urb->status); // FIXME for keepalives, respond immediately (asynchronously) // if not an RNDIS status, do like cdc_status(dev,urb) does } EXPORT_SYMBOL_GPL(rndis_status); /* * RNDIS indicate messages. */ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg, int buflen) { struct cdc_state *info = (void *)&dev->data; struct device *udev = &info->control->dev; if (dev->driver_info->indication) { dev->driver_info->indication(dev, msg, buflen); } else { switch (msg->status) { case RNDIS_STATUS_MEDIA_CONNECT: dev_info(udev, "rndis media connect\n"); break; case RNDIS_STATUS_MEDIA_DISCONNECT: dev_info(udev, "rndis media disconnect\n"); break; default: dev_info(udev, "rndis indication: 0x%08x\n", le32_to_cpu(msg->status)); } } } /* * RPC done RNDIS-style. Caller guarantees: * - message is properly byteswapped * - there's no other request pending * - buf can hold up to 1KB response (required by RNDIS spec) * On return, the first few entries are already byteswapped. * * Call context is likely probe(), before interface name is known, * which is why we won't try to use it in the diagnostics. */ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) { struct cdc_state *info = (void *) &dev->data; int master_ifnum; int retval; unsigned count; __le32 rsp; u32 xid = 0, msg_len, request_id; /* REVISIT when this gets called from contexts other than probe() or * disconnect(): either serialize, or dispatch responses on xid */ /* Issue the request; xid is unique, don't bother byteswapping it */ if (likely(buf->msg_type != RNDIS_MSG_HALT && buf->msg_type != RNDIS_MSG_RESET)) { xid = dev->xid++; if (!xid) xid = dev->xid++; buf->request_id = (__force __le32) xid; } master_ifnum = info->control->cur_altsetting->desc.bInterfaceNumber; retval = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), USB_CDC_SEND_ENCAPSULATED_COMMAND, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, master_ifnum, buf, le32_to_cpu(buf->msg_len), RNDIS_CONTROL_TIMEOUT_MS); if (unlikely(retval < 0 || xid == 0)) return retval; // FIXME Seems like some devices discard responses when // we time out and cancel our "get response" requests... // so, this is fragile. Probably need to poll for status. /* ignore status endpoint, just poll the control channel; * the request probably completed immediately */ rsp = buf->msg_type | RNDIS_MSG_COMPLETION; for (count = 0; count < 10; count++) { memset(buf, 0, CONTROL_BUFFER_SIZE); retval = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), USB_CDC_GET_ENCAPSULATED_RESPONSE, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, master_ifnum, buf, buflen, RNDIS_CONTROL_TIMEOUT_MS); if (likely(retval >= 8)) { msg_len = le32_to_cpu(buf->msg_len); request_id = (__force u32) buf->request_id; if (likely(buf->msg_type == rsp)) { if (likely(request_id == xid)) { if (unlikely(rsp == RNDIS_MSG_RESET_C)) return 0; if (likely(RNDIS_STATUS_SUCCESS == buf->status)) return 0; dev_dbg(&info->control->dev, "rndis reply status %08x\n", le32_to_cpu(buf->status)); return -EL3RST; } dev_dbg(&info->control->dev, "rndis reply id %d expected %d\n", request_id, xid); /* then likely retry */ } else switch (buf->msg_type) { case RNDIS_MSG_INDICATE: /* fault/event */ rndis_msg_indicate(dev, (void *)buf, buflen); break; case RNDIS_MSG_KEEPALIVE: { /* ping */ struct rndis_keepalive_c *msg = (void *)buf; msg->msg_type = RNDIS_MSG_KEEPALIVE_C; msg->msg_len = cpu_to_le32(sizeof *msg); msg->status = RNDIS_STATUS_SUCCESS; retval = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), USB_CDC_SEND_ENCAPSULATED_COMMAND, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, master_ifnum, msg, sizeof *msg, RNDIS_CONTROL_TIMEOUT_MS); if (unlikely(retval < 0)) dev_dbg(&info->control->dev, "rndis keepalive err %d\n", retval); } break; default: dev_dbg(&info->control->dev, "unexpected rndis msg %08x len %d\n", le32_to_cpu(buf->msg_type), msg_len); } } else { /* device probably issued a protocol stall; ignore */ dev_dbg(&info->control->dev, "rndis response error, code %d\n", retval); } msleep(20); } dev_dbg(&info->control->dev, "rndis response timeout\n"); return -ETIMEDOUT; } EXPORT_SYMBOL_GPL(rndis_command); /* * rndis_query: * * Performs a query for @oid along with 0 or more bytes of payload as * specified by @in_len. If @reply_len is not set to -1 then the reply * length is checked against this value, resulting in an error if it * doesn't match. * * NOTE: Adding a payload exactly or greater than the size of the expected * response payload is an evident requirement MSFT added for ActiveSync. * * The only exception is for OIDs that return a variably sized response, * in which case no payload should be added. This undocumented (and * nonsensical!) issue was found by sniffing protocol requests from the * ActiveSync 4.1 Windows driver. */ static int rndis_query(struct usbnet *dev, struct usb_interface *intf, void *buf, __le32 oid, u32 in_len, void **reply, int *reply_len) { int retval; union { void *buf; struct rndis_msg_hdr *header; struct rndis_query *get; struct rndis_query_c *get_c; } u; u32 off, len; u.buf = buf; memset(u.get, 0, sizeof *u.get + in_len); u.get->msg_type = RNDIS_MSG_QUERY; u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len); u.get->oid = oid; u.get->len = cpu_to_le32(in_len); u.get->offset = cpu_to_le32(20); retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) failed, %d\n", oid, retval); return retval; } off = le32_to_cpu(u.get_c->offset); len = le32_to_cpu(u.get_c->len); if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE)) goto response_error; if (*reply_len != -1 && len != *reply_len) goto response_error; *reply = (unsigned char *) &u.get_c->request_id + off; *reply_len = len; return retval; response_error: dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) " "invalid response - off %d len %d\n", oid, off, len); return -EDOM; } /* same as usbnet_netdev_ops but MTU change not allowed */ static const struct net_device_ops rndis_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; int generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) { int retval; struct net_device *net = dev->net; struct cdc_state *info = (void *) &dev->data; union { void *buf; struct rndis_msg_hdr *header; struct rndis_init *init; struct rndis_init_c *init_c; struct rndis_query *get; struct rndis_query_c *get_c; struct rndis_set *set; struct rndis_set_c *set_c; struct rndis_halt *halt; } u; u32 tmp; __le32 phym_unspec, *phym; int reply_len; unsigned char *bp; /* we can't rely on i/o from stack working, or stack allocation */ u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); if (!u.buf) return -ENOMEM; retval = usbnet_generic_cdc_bind(dev, intf); if (retval < 0) goto fail; u.init->msg_type = RNDIS_MSG_INIT; u.init->msg_len = cpu_to_le32(sizeof *u.init); u.init->major_version = cpu_to_le32(1); u.init->minor_version = cpu_to_le32(0); /* max transfer (in spec) is 0x4000 at full speed, but for * TX we'll stick to one Ethernet packet plus RNDIS framing. * For RX we handle drivers that zero-pad to end-of-packet. * Don't let userspace change these settings. * * NOTE: there still seems to be wierdness here, as if we need * to do some more things to make sure WinCE targets accept this. * They default to jumbograms of 8KB or 16KB, which is absurd * for such low data rates and which is also more than Linux * can usually expect to allocate for SKB data... */ net->hard_header_len += sizeof (struct rndis_data_hdr); dev->hard_mtu = net->mtu + net->hard_header_len; dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1); if (dev->maxpacket == 0) { if (netif_msg_probe(dev)) dev_dbg(&intf->dev, "dev->maxpacket can't be 0\n"); retval = -EINVAL; goto fail_and_release; } dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1); dev->rx_urb_size &= ~(dev->maxpacket - 1); u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size); net->netdev_ops = &rndis_netdev_ops; retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { /* it might not even be an RNDIS device!! */ dev_err(&intf->dev, "RNDIS init failed, %d\n", retval); goto fail_and_release; } tmp = le32_to_cpu(u.init_c->max_transfer_size); if (tmp < dev->hard_mtu) { if (tmp <= net->hard_header_len) { dev_err(&intf->dev, "dev can't take %u byte packets (max %u)\n", dev->hard_mtu, tmp); retval = -EINVAL; goto halt_fail_and_release; } dev_warn(&intf->dev, "dev can't take %u byte packets (max %u), " "adjusting MTU to %u\n", dev->hard_mtu, tmp, tmp - net->hard_header_len); dev->hard_mtu = tmp; net->mtu = dev->hard_mtu - net->hard_header_len; } /* REVISIT: peripheral "alignment" request is ignored ... */ dev_dbg(&intf->dev, "hard mtu %u (%u from dev), rx buflen %Zu, align %d\n", dev->hard_mtu, tmp, dev->rx_urb_size, 1 << le32_to_cpu(u.init_c->packet_alignment)); /* module has some device initialization code needs to be done right * after RNDIS_INIT */ if (dev->driver_info->early_init && dev->driver_info->early_init(dev) != 0) goto halt_fail_and_release; /* Check physical medium */ phym = NULL; reply_len = sizeof *phym; retval = rndis_query(dev, intf, u.buf, OID_GEN_PHYSICAL_MEDIUM, 0, (void **) &phym, &reply_len); if (retval != 0 || !phym) { /* OID is optional so don't fail here. */ phym_unspec = RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED; phym = &phym_unspec; } if ((flags & FLAG_RNDIS_PHYM_WIRELESS) && *phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { if (netif_msg_probe(dev)) dev_dbg(&intf->dev, "driver requires wireless " "physical medium, but device is not.\n"); retval = -ENODEV; goto halt_fail_and_release; } if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) && *phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { if (netif_msg_probe(dev)) dev_dbg(&intf->dev, "driver requires non-wireless " "physical medium, but device is wireless.\n"); retval = -ENODEV; goto halt_fail_and_release; } /* Get designated host ethernet address */ reply_len = ETH_ALEN; retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS, 48, (void **) &bp, &reply_len); if (unlikely(retval< 0)) { dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval); goto halt_fail_and_release; } memcpy(net->dev_addr, bp, ETH_ALEN); memcpy(net->perm_addr, bp, ETH_ALEN); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); u.set->msg_type = RNDIS_MSG_SET; u.set->msg_len = cpu_to_le32(4 + sizeof *u.set); u.set->oid = OID_GEN_CURRENT_PACKET_FILTER; u.set->len = cpu_to_le32(4); u.set->offset = cpu_to_le32((sizeof *u.set) - 8); *(__le32 *)(u.buf + sizeof *u.set) = RNDIS_DEFAULT_FILTER; retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { dev_err(&intf->dev, "rndis set packet filter, %d\n", retval); goto halt_fail_and_release; } retval = 0; kfree(u.buf); return retval; halt_fail_and_release: memset(u.halt, 0, sizeof *u.halt); u.halt->msg_type = RNDIS_MSG_HALT; u.halt->msg_len = cpu_to_le32(sizeof *u.halt); (void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE); fail_and_release: usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver_of(intf), info->data); info->data = NULL; fail: kfree(u.buf); return retval; } EXPORT_SYMBOL_GPL(generic_rndis_bind); static int rndis_bind(struct usbnet *dev, struct usb_interface *intf) { return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS); } void rndis_unbind(struct usbnet *dev, struct usb_interface *intf) { struct rndis_halt *halt; /* try to clear any rndis state/activity (no i/o from stack!) */ halt = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); if (halt) { halt->msg_type = RNDIS_MSG_HALT; halt->msg_len = cpu_to_le32(sizeof *halt); (void) rndis_command(dev, (void *)halt, CONTROL_BUFFER_SIZE); kfree(halt); } usbnet_cdc_unbind(dev, intf); } EXPORT_SYMBOL_GPL(rndis_unbind); /* * DATA -- host must not write zlps */ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { /* peripheral may have batched packets to us... */ while (likely(skb->len)) { struct rndis_data_hdr *hdr = (void *)skb->data; struct sk_buff *skb2; u32 msg_len, data_offset, data_len; msg_len = le32_to_cpu(hdr->msg_len); data_offset = le32_to_cpu(hdr->data_offset); data_len = le32_to_cpu(hdr->data_len); /* don't choke if we see oob, per-packet data, etc */ if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET || skb->len < msg_len || (data_offset + data_len + 8) > msg_len)) { dev->net->stats.rx_frame_errors++; devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d", le32_to_cpu(hdr->msg_type), msg_len, data_offset, data_len, skb->len); return 0; } skb_pull(skb, 8 + data_offset); /* at most one packet left? */ if (likely((data_len - skb->len) <= sizeof *hdr)) { skb_trim(skb, data_len); break; } /* try to return all the packets in the batch */ skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) break; skb_pull(skb, msg_len - sizeof *hdr); skb_trim(skb2, data_len); usbnet_skb_return(dev, skb2); } /* caller will usbnet_skb_return the remaining packet */ return 1; } EXPORT_SYMBOL_GPL(rndis_rx_fixup); struct sk_buff * rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct rndis_data_hdr *hdr; struct sk_buff *skb2; unsigned len = skb->len; if (likely(!skb_cloned(skb))) { int room = skb_headroom(skb); /* enough head room as-is? */ if (unlikely((sizeof *hdr) <= room)) goto fill; /* enough room, but needs to be readjusted? */ room += skb_tailroom(skb); if (likely((sizeof *hdr) <= room)) { skb->data = memmove(skb->head + sizeof *hdr, skb->data, len); skb_set_tail_pointer(skb, len); goto fill; } } /* create a new skb, with the correct size (and tailpad) */ skb2 = skb_copy_expand(skb, sizeof *hdr, 1, flags); dev_kfree_skb_any(skb); if (unlikely(!skb2)) return skb2; skb = skb2; /* fill out the RNDIS header. we won't bother trying to batch * packets; Linux minimizes wasted bandwidth through tx queues. */ fill: hdr = (void *) __skb_push(skb, sizeof *hdr); memset(hdr, 0, sizeof *hdr); hdr->msg_type = RNDIS_MSG_PACKET; hdr->msg_len = cpu_to_le32(skb->len); hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8); hdr->data_len = cpu_to_le32(len); /* FIXME make the last packet always be short ... */ return skb; } EXPORT_SYMBOL_GPL(rndis_tx_fixup); static const struct driver_info rndis_info = { .description = "RNDIS device", .flags = FLAG_ETHER | FLAG_FRAMING_RN | FLAG_NO_SETINT, .bind = rndis_bind, .unbind = rndis_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, }; /*-------------------------------------------------------------------------*/ static const struct usb_device_id products [] = { { /* RNDIS is MSFT's un-official variant of CDC ACM */ USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_info, }, { /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), .driver_info = (unsigned long) &rndis_info, }, { /* RNDIS for tethering */ USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), .driver_info = (unsigned long) &rndis_info, }, { }, // END }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver rndis_driver = { .name = "rndis_host", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, }; static int __init rndis_init(void) { return usb_register(&rndis_driver); } module_init(rndis_init); static void __exit rndis_exit(void) { usb_deregister(&rndis_driver); } module_exit(rndis_exit); MODULE_AUTHOR("David Brownell"); MODULE_DESCRIPTION("USB Host side RNDIS driver"); MODULE_LICENSE("GPL");
gpl-2.0
nmacs/linux-2.6.34.14-atlas
drivers/net/tulip/xircom_cb.c
49
32505
/* * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards * * This software is (C) by the respective authors, and licensed under the GPL * License. * * Written by Arjan van de Ven for Red Hat, Inc. * Based on work by Jeff Garzik, Doug Ledford and Donald Becker * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * * $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <asm/uaccess.h> #include <asm/io.h> #ifdef CONFIG_NET_POLL_CONTROLLER #include <asm/irq.h> #endif #ifdef DEBUG #define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__) #define leave(x) printk("Leave: %s, %s line %i\n",x,__FILE__,__LINE__) #else #define enter(x) do {} while (0) #define leave(x) do {} while (0) #endif MODULE_DESCRIPTION("Xircom Cardbus ethernet driver"); MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>"); MODULE_LICENSE("GPL"); /* IO registers on the card, offsets */ #define CSR0 0x00 #define CSR1 0x08 #define CSR2 0x10 #define CSR3 0x18 #define CSR4 0x20 #define CSR5 0x28 #define CSR6 0x30 #define CSR7 0x38 #define CSR8 0x40 #define CSR9 0x48 #define CSR10 0x50 #define CSR11 0x58 #define CSR12 0x60 #define CSR13 0x68 #define CSR14 0x70 #define CSR15 0x78 #define CSR16 0x80 /* PCI registers */ #define PCI_POWERMGMT 0x40 /* Offsets of the buffers within the descriptor pages, in bytes */ #define NUMDESCRIPTORS 4 static int bufferoffsets[NUMDESCRIPTORS] = {128,2048,4096,6144}; struct xircom_private { /* Send and receive buffers, kernel-addressable and dma addressable forms */ __le32 *rx_buffer; __le32 *tx_buffer; dma_addr_t rx_dma_handle; dma_addr_t tx_dma_handle; struct sk_buff *tx_skb[4]; unsigned long io_port; int open; /* transmit_used is the rotating counter that indicates which transmit descriptor has to be used next */ int transmit_used; /* Spinlock to serialize register operations. It must be helt while manipulating the following registers: CSR0, CSR6, CSR7, CSR9, CSR10, CSR15 */ spinlock_t lock; struct pci_dev *pdev; struct net_device *dev; }; /* Function prototypes */ static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void xircom_remove(struct pci_dev *pdev); static irqreturn_t xircom_interrupt(int irq, void *dev_instance); static netdev_tx_t xircom_start_xmit(struct sk_buff *skb, struct net_device *dev); static int xircom_open(struct net_device *dev); static int xircom_close(struct net_device *dev); static void xircom_up(struct xircom_private *card); #ifdef CONFIG_NET_POLL_CONTROLLER static void xircom_poll_controller(struct net_device *dev); #endif static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset); static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset); static void read_mac_address(struct xircom_private *card); static void transceiver_voodoo(struct xircom_private *card); static void initialize_card(struct xircom_private *card); static void trigger_transmit(struct xircom_private *card); static void trigger_receive(struct xircom_private *card); static void setup_descriptors(struct xircom_private *card); static void remove_descriptors(struct xircom_private *card); static int link_status_changed(struct xircom_private *card); static void activate_receiver(struct xircom_private *card); static void deactivate_receiver(struct xircom_private *card); static void activate_transmitter(struct xircom_private *card); static void deactivate_transmitter(struct xircom_private *card); static void enable_transmit_interrupt(struct xircom_private *card); static void enable_receive_interrupt(struct xircom_private *card); static void enable_link_interrupt(struct xircom_private *card); static void disable_all_interrupts(struct xircom_private *card); static int link_status(struct xircom_private *card); static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = { {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,}, {0,}, }; MODULE_DEVICE_TABLE(pci, xircom_pci_table); static struct pci_driver xircom_ops = { .name = "xircom_cb", .id_table = xircom_pci_table, .probe = xircom_probe, .remove = xircom_remove, .suspend =NULL, .resume =NULL }; #ifdef DEBUG static void print_binary(unsigned int number) { int i,i2; char buffer[64]; memset(buffer,0,64); i2=0; for (i=31;i>=0;i--) { if (number & (1<<i)) buffer[i2++]='1'; else buffer[i2++]='0'; if ((i&3)==0) buffer[i2++]=' '; } printk("%s\n",buffer); } #endif static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct xircom_private *private = netdev_priv(dev); strcpy(info->driver, "xircom_cb"); strcpy(info->bus_info, pci_name(private->pdev)); } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, }; static const struct net_device_ops netdev_ops = { .ndo_open = xircom_open, .ndo_stop = xircom_close, .ndo_start_xmit = xircom_start_xmit, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xircom_poll_controller, #endif }; /* xircom_probe is the code that gets called on device insertion. it sets up the hardware and registers the device to the networklayer. TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the first two packets that get send, and pump hates that. */ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct net_device *dev = NULL; struct xircom_private *private; unsigned long flags; unsigned short tmp16; enter("xircom_probe"); /* First do the PCI initialisation */ if (pci_enable_device(pdev)) return -ENODEV; /* disable all powermanagement */ pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000); pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/ /* clear PCI status, if any */ pci_read_config_word (pdev,PCI_STATUS, &tmp16); pci_write_config_word (pdev, PCI_STATUS,tmp16); if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { pr_err("%s: failed to allocate io-region\n", __func__); return -ENODEV; } /* Before changing the hardware, allocate the memory. This way, we can fail gracefully if not enough memory is available. */ dev = alloc_etherdev(sizeof(struct xircom_private)); if (!dev) { pr_err("%s: failed to allocate etherdev\n", __func__); goto device_fail; } private = netdev_priv(dev); /* Allocate the send/receive buffers */ private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); if (private->rx_buffer == NULL) { pr_err("%s: no memory for rx buffer\n", __func__); goto rx_buf_fail; } private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); if (private->tx_buffer == NULL) { pr_err("%s: no memory for tx buffer\n", __func__); goto tx_buf_fail; } SET_NETDEV_DEV(dev, &pdev->dev); private->dev = dev; private->pdev = pdev; private->io_port = pci_resource_start(pdev, 0); spin_lock_init(&private->lock); dev->irq = pdev->irq; dev->base_addr = private->io_port; initialize_card(private); read_mac_address(private); setup_descriptors(private); dev->netdev_ops = &netdev_ops; SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); pci_set_drvdata(pdev, dev); if (register_netdev(dev)) { pr_err("%s: netdevice registration failed\n", __func__); goto reg_fail; } dev_info(&dev->dev, "Xircom cardbus revision %i at irq %i\n", pdev->revision, pdev->irq); /* start the transmitter to get a heartbeat */ /* TODO: send 2 dummy packets here */ transceiver_voodoo(private); spin_lock_irqsave(&private->lock,flags); activate_transmitter(private); activate_receiver(private); spin_unlock_irqrestore(&private->lock,flags); trigger_receive(private); leave("xircom_probe"); return 0; reg_fail: kfree(private->tx_buffer); tx_buf_fail: kfree(private->rx_buffer); rx_buf_fail: free_netdev(dev); device_fail: return -ENODEV; } /* xircom_remove is called on module-unload or on device-eject. it unregisters the irq, io-region and network device. Interrupts and such are already stopped in the "ifconfig ethX down" code. */ static void __devexit xircom_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct xircom_private *card = netdev_priv(dev); enter("xircom_remove"); pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle); pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle); release_region(dev->base_addr, 128); unregister_netdev(dev); free_netdev(dev); pci_set_drvdata(pdev, NULL); leave("xircom_remove"); } static irqreturn_t xircom_interrupt(int irq, void *dev_instance) { struct net_device *dev = (struct net_device *) dev_instance; struct xircom_private *card = netdev_priv(dev); unsigned int status; int i; enter("xircom_interrupt\n"); spin_lock(&card->lock); status = inl(card->io_port+CSR5); #ifdef DEBUG print_binary(status); printk("tx status 0x%08x 0x%08x \n", card->tx_buffer[0], card->tx_buffer[4]); printk("rx status 0x%08x 0x%08x \n", card->rx_buffer[0], card->rx_buffer[4]); #endif /* Handle shared irq and hotplug */ if (status == 0 || status == 0xffffffff) { spin_unlock(&card->lock); return IRQ_NONE; } if (link_status_changed(card)) { int newlink; printk(KERN_DEBUG "xircom_cb: Link status has changed\n"); newlink = link_status(card); dev_info(&dev->dev, "Link is %i mbit\n", newlink); if (newlink) netif_carrier_on(dev); else netif_carrier_off(dev); } /* Clear all remaining interrupts */ status |= 0xffffffff; /* FIXME: make this clear only the real existing bits */ outl(status,card->io_port+CSR5); for (i=0;i<NUMDESCRIPTORS;i++) investigate_write_descriptor(dev,card,i,bufferoffsets[i]); for (i=0;i<NUMDESCRIPTORS;i++) investigate_read_descriptor(dev,card,i,bufferoffsets[i]); spin_unlock(&card->lock); leave("xircom_interrupt"); return IRQ_HANDLED; } static netdev_tx_t xircom_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xircom_private *card; unsigned long flags; int nextdescriptor; int desc; enter("xircom_start_xmit"); card = netdev_priv(dev); spin_lock_irqsave(&card->lock,flags); /* First see if we can free some descriptors */ for (desc=0;desc<NUMDESCRIPTORS;desc++) investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]); nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS); desc = card->transmit_used; /* only send the packet if the descriptor is free */ if (card->tx_buffer[4*desc]==0) { /* Copy the packet data; zero the memory first as the card sometimes sends more than you ask it to. */ memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); skb_copy_from_linear_data(skb, &(card->tx_buffer[bufferoffsets[desc] / 4]), skb->len); /* FIXME: The specification tells us that the length we send HAS to be a multiple of 4 bytes. */ card->tx_buffer[4*desc+1] = cpu_to_le32(skb->len); if (desc == NUMDESCRIPTORS - 1) /* bit 25: last descriptor of the ring */ card->tx_buffer[4*desc+1] |= cpu_to_le32(1<<25); card->tx_buffer[4*desc+1] |= cpu_to_le32(0xF0000000); /* 0xF0... means want interrupts*/ card->tx_skb[desc] = skb; wmb(); /* This gives the descriptor to the card */ card->tx_buffer[4*desc] = cpu_to_le32(0x80000000); trigger_transmit(card); if (card->tx_buffer[nextdescriptor*4] & cpu_to_le32(0x8000000)) { /* next descriptor is occupied... */ netif_stop_queue(dev); } card->transmit_used = nextdescriptor; leave("xircom-start_xmit - sent"); spin_unlock_irqrestore(&card->lock,flags); return NETDEV_TX_OK; } /* Uh oh... no free descriptor... drop the packet */ netif_stop_queue(dev); spin_unlock_irqrestore(&card->lock,flags); trigger_transmit(card); return NETDEV_TX_BUSY; } static int xircom_open(struct net_device *dev) { struct xircom_private *xp = netdev_priv(dev); int retval; enter("xircom_open"); pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n", dev->name, dev->irq); retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev); if (retval) { leave("xircom_open - No IRQ"); return retval; } xircom_up(xp); xp->open = 1; leave("xircom_open"); return 0; } static int xircom_close(struct net_device *dev) { struct xircom_private *card; unsigned long flags; enter("xircom_close"); card = netdev_priv(dev); netif_stop_queue(dev); /* we don't want new packets */ spin_lock_irqsave(&card->lock,flags); disable_all_interrupts(card); #if 0 /* We can enable this again once we send dummy packets on ifconfig ethX up */ deactivate_receiver(card); deactivate_transmitter(card); #endif remove_descriptors(card); spin_unlock_irqrestore(&card->lock,flags); card->open = 0; free_irq(dev->irq,dev); leave("xircom_close"); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xircom_poll_controller(struct net_device *dev) { disable_irq(dev->irq); xircom_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif static void initialize_card(struct xircom_private *card) { unsigned int val; unsigned long flags; enter("initialize_card"); spin_lock_irqsave(&card->lock, flags); /* First: reset the card */ val = inl(card->io_port + CSR0); val |= 0x01; /* Software reset */ outl(val, card->io_port + CSR0); udelay(100); /* give the card some time to reset */ val = inl(card->io_port + CSR0); val &= ~0x01; /* disable Software reset */ outl(val, card->io_port + CSR0); val = 0; /* Value 0x00 is a safe and conservative value for the PCI configuration settings */ outl(val, card->io_port + CSR0); disable_all_interrupts(card); deactivate_receiver(card); deactivate_transmitter(card); spin_unlock_irqrestore(&card->lock, flags); leave("initialize_card"); } /* trigger_transmit causes the card to check for frames to be transmitted. This is accomplished by writing to the CSR1 port. The documentation claims that the act of writing is sufficient and that the value is ignored; I chose zero. */ static void trigger_transmit(struct xircom_private *card) { unsigned int val; enter("trigger_transmit"); val = 0; outl(val, card->io_port + CSR1); leave("trigger_transmit"); } /* trigger_receive causes the card to check for empty frames in the descriptor list in which packets can be received. This is accomplished by writing to the CSR2 port. The documentation claims that the act of writing is sufficient and that the value is ignored; I chose zero. */ static void trigger_receive(struct xircom_private *card) { unsigned int val; enter("trigger_receive"); val = 0; outl(val, card->io_port + CSR2); leave("trigger_receive"); } /* setup_descriptors initializes the send and receive buffers to be valid descriptors and programs the addresses into the card. */ static void setup_descriptors(struct xircom_private *card) { u32 address; int i; enter("setup_descriptors"); BUG_ON(card->rx_buffer == NULL); BUG_ON(card->tx_buffer == NULL); /* Receive descriptors */ memset(card->rx_buffer, 0, 128); /* clear the descriptors */ for (i=0;i<NUMDESCRIPTORS;i++ ) { /* Rx Descr0: It's empty, let the card own it, no errors -> 0x80000000 */ card->rx_buffer[i*4 + 0] = cpu_to_le32(0x80000000); /* Rx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */ card->rx_buffer[i*4 + 1] = cpu_to_le32(1536); if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */ card->rx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25); /* Rx Descr2: address of the buffer we store the buffer at the 2nd half of the page */ address = card->rx_dma_handle; card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]); /* Rx Desc3: address of 2nd buffer -> 0 */ card->rx_buffer[i*4 + 3] = 0; } wmb(); /* Write the receive descriptor ring address to the card */ address = card->rx_dma_handle; outl(address, card->io_port + CSR3); /* Receive descr list address */ /* transmit descriptors */ memset(card->tx_buffer, 0, 128); /* clear the descriptors */ for (i=0;i<NUMDESCRIPTORS;i++ ) { /* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */ card->tx_buffer[i*4 + 0] = 0x00000000; /* Tx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */ card->tx_buffer[i*4 + 1] = cpu_to_le32(1536); if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */ card->tx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25); /* Tx Descr2: address of the buffer we store the buffer at the 2nd half of the page */ address = card->tx_dma_handle; card->tx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]); /* Tx Desc3: address of 2nd buffer -> 0 */ card->tx_buffer[i*4 + 3] = 0; } wmb(); /* wite the transmit descriptor ring to the card */ address = card->tx_dma_handle; outl(address, card->io_port + CSR4); /* xmit descr list address */ leave("setup_descriptors"); } /* remove_descriptors informs the card the descriptors are no longer valid by setting the address in the card to 0x00. */ static void remove_descriptors(struct xircom_private *card) { unsigned int val; enter("remove_descriptors"); val = 0; outl(val, card->io_port + CSR3); /* Receive descriptor address */ outl(val, card->io_port + CSR4); /* Send descriptor address */ leave("remove_descriptors"); } /* link_status_changed returns 1 if the card has indicated that the link status has changed. The new link status has to be read from CSR12. This function also clears the status-bit. */ static int link_status_changed(struct xircom_private *card) { unsigned int val; enter("link_status_changed"); val = inl(card->io_port + CSR5); /* Status register */ if ((val & (1 << 27)) == 0) { /* no change */ leave("link_status_changed - nochange"); return 0; } /* clear the event by writing a 1 to the bit in the status register. */ val = (1 << 27); outl(val, card->io_port + CSR5); leave("link_status_changed - changed"); return 1; } /* transmit_active returns 1 if the transmitter on the card is in a non-stopped state. */ static int transmit_active(struct xircom_private *card) { unsigned int val; enter("transmit_active"); val = inl(card->io_port + CSR5); /* Status register */ if ((val & (7 << 20)) == 0) { /* transmitter disabled */ leave("transmit_active - inactive"); return 0; } leave("transmit_active - active"); return 1; } /* receive_active returns 1 if the receiver on the card is in a non-stopped state. */ static int receive_active(struct xircom_private *card) { unsigned int val; enter("receive_active"); val = inl(card->io_port + CSR5); /* Status register */ if ((val & (7 << 17)) == 0) { /* receiver disabled */ leave("receive_active - inactive"); return 0; } leave("receive_active - active"); return 1; } /* activate_receiver enables the receiver on the card. Before being allowed to active the receiver, the receiver must be completely de-activated. To achieve this, this code actually disables the receiver first; then it waits for the receiver to become inactive, then it activates the receiver and then it waits for the receiver to be active. must be called with the lock held and interrupts disabled. */ static void activate_receiver(struct xircom_private *card) { unsigned int val; int counter; enter("activate_receiver"); val = inl(card->io_port + CSR6); /* Operation mode */ /* If the "active" bit is set and the receiver is already active, no need to do the expensive thing */ if ((val&2) && (receive_active(card))) return; val = val & ~2; /* disable the receiver */ outl(val, card->io_port + CSR6); counter = 10; while (counter > 0) { if (!receive_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) pr_err("Receiver failed to deactivate\n"); } /* enable the receiver */ val = inl(card->io_port + CSR6); /* Operation mode */ val = val | 2; /* enable the receiver */ outl(val, card->io_port + CSR6); /* now wait for the card to activate again */ counter = 10; while (counter > 0) { if (receive_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) pr_err("Receiver failed to re-activate\n"); } leave("activate_receiver"); } /* deactivate_receiver disables the receiver on the card. To achieve this this code disables the receiver first; then it waits for the receiver to become inactive. must be called with the lock held and interrupts disabled. */ static void deactivate_receiver(struct xircom_private *card) { unsigned int val; int counter; enter("deactivate_receiver"); val = inl(card->io_port + CSR6); /* Operation mode */ val = val & ~2; /* disable the receiver */ outl(val, card->io_port + CSR6); counter = 10; while (counter > 0) { if (!receive_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) pr_err("Receiver failed to deactivate\n"); } leave("deactivate_receiver"); } /* activate_transmitter enables the transmitter on the card. Before being allowed to active the transmitter, the transmitter must be completely de-activated. To achieve this, this code actually disables the transmitter first; then it waits for the transmitter to become inactive, then it activates the transmitter and then it waits for the transmitter to be active again. must be called with the lock held and interrupts disabled. */ static void activate_transmitter(struct xircom_private *card) { unsigned int val; int counter; enter("activate_transmitter"); val = inl(card->io_port + CSR6); /* Operation mode */ /* If the "active" bit is set and the receiver is already active, no need to do the expensive thing */ if ((val&(1<<13)) && (transmit_active(card))) return; val = val & ~(1 << 13); /* disable the transmitter */ outl(val, card->io_port + CSR6); counter = 10; while (counter > 0) { if (!transmit_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) pr_err("Transmitter failed to deactivate\n"); } /* enable the transmitter */ val = inl(card->io_port + CSR6); /* Operation mode */ val = val | (1 << 13); /* enable the transmitter */ outl(val, card->io_port + CSR6); /* now wait for the card to activate again */ counter = 10; while (counter > 0) { if (transmit_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) pr_err("Transmitter failed to re-activate\n"); } leave("activate_transmitter"); } /* deactivate_transmitter disables the transmitter on the card. To achieve this this code disables the transmitter first; then it waits for the transmitter to become inactive. must be called with the lock held and interrupts disabled. */ static void deactivate_transmitter(struct xircom_private *card) { unsigned int val; int counter; enter("deactivate_transmitter"); val = inl(card->io_port + CSR6); /* Operation mode */ val = val & ~2; /* disable the transmitter */ outl(val, card->io_port + CSR6); counter = 20; while (counter > 0) { if (!transmit_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) pr_err("Transmitter failed to deactivate\n"); } leave("deactivate_transmitter"); } /* enable_transmit_interrupt enables the transmit interrupt must be called with the lock held and interrupts disabled. */ static void enable_transmit_interrupt(struct xircom_private *card) { unsigned int val; enter("enable_transmit_interrupt"); val = inl(card->io_port + CSR7); /* Interrupt enable register */ val |= 1; /* enable the transmit interrupt */ outl(val, card->io_port + CSR7); leave("enable_transmit_interrupt"); } /* enable_receive_interrupt enables the receive interrupt must be called with the lock held and interrupts disabled. */ static void enable_receive_interrupt(struct xircom_private *card) { unsigned int val; enter("enable_receive_interrupt"); val = inl(card->io_port + CSR7); /* Interrupt enable register */ val = val | (1 << 6); /* enable the receive interrupt */ outl(val, card->io_port + CSR7); leave("enable_receive_interrupt"); } /* enable_link_interrupt enables the link status change interrupt must be called with the lock held and interrupts disabled. */ static void enable_link_interrupt(struct xircom_private *card) { unsigned int val; enter("enable_link_interrupt"); val = inl(card->io_port + CSR7); /* Interrupt enable register */ val = val | (1 << 27); /* enable the link status chage interrupt */ outl(val, card->io_port + CSR7); leave("enable_link_interrupt"); } /* disable_all_interrupts disables all interrupts must be called with the lock held and interrupts disabled. */ static void disable_all_interrupts(struct xircom_private *card) { unsigned int val; enter("enable_all_interrupts"); val = 0; /* disable all interrupts */ outl(val, card->io_port + CSR7); leave("disable_all_interrupts"); } /* enable_common_interrupts enables several weird interrupts must be called with the lock held and interrupts disabled. */ static void enable_common_interrupts(struct xircom_private *card) { unsigned int val; enter("enable_link_interrupt"); val = inl(card->io_port + CSR7); /* Interrupt enable register */ val |= (1<<16); /* Normal Interrupt Summary */ val |= (1<<15); /* Abnormal Interrupt Summary */ val |= (1<<13); /* Fatal bus error */ val |= (1<<8); /* Receive Process Stopped */ val |= (1<<7); /* Receive Buffer Unavailable */ val |= (1<<5); /* Transmit Underflow */ val |= (1<<2); /* Transmit Buffer Unavailable */ val |= (1<<1); /* Transmit Process Stopped */ outl(val, card->io_port + CSR7); leave("enable_link_interrupt"); } /* enable_promisc starts promisc mode must be called with the lock held and interrupts disabled. */ static int enable_promisc(struct xircom_private *card) { unsigned int val; enter("enable_promisc"); val = inl(card->io_port + CSR6); val = val | (1 << 6); outl(val, card->io_port + CSR6); leave("enable_promisc"); return 1; } /* link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. Must be called in locked state with interrupts disabled */ static int link_status(struct xircom_private *card) { unsigned int val; enter("link_status"); val = inb(card->io_port + CSR12); if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ return 10; if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ return 100; /* If we get here -> no link at all */ leave("link_status"); return 0; } /* read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure. This function will take the spinlock itself and can, as a result, not be called with the lock helt. */ static void read_mac_address(struct xircom_private *card) { unsigned char j, tuple, link, data_id, data_count; unsigned long flags; int i; enter("read_mac_address"); spin_lock_irqsave(&card->lock, flags); outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */ for (i = 0x100; i < 0x1f7; i += link + 2) { outl(i, card->io_port + CSR10); tuple = inl(card->io_port + CSR9) & 0xff; outl(i + 1, card->io_port + CSR10); link = inl(card->io_port + CSR9) & 0xff; outl(i + 2, card->io_port + CSR10); data_id = inl(card->io_port + CSR9) & 0xff; outl(i + 3, card->io_port + CSR10); data_count = inl(card->io_port + CSR9) & 0xff; if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { /* * This is it. We have the data we want. */ for (j = 0; j < 6; j++) { outl(i + j + 4, card->io_port + CSR10); card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff; } break; } else if (link == 0) { break; } } spin_unlock_irqrestore(&card->lock, flags); pr_debug(" %pM\n", card->dev->dev_addr); leave("read_mac_address"); } /* transceiver_voodoo() enables the external UTP plug thingy. it's called voodoo as I stole this code and cannot cross-reference it with the specification. */ static void transceiver_voodoo(struct xircom_private *card) { unsigned long flags; enter("transceiver_voodoo"); /* disable all powermanagement */ pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000); setup_descriptors(card); spin_lock_irqsave(&card->lock, flags); outl(0x0008, card->io_port + CSR15); udelay(25); outl(0xa8050000, card->io_port + CSR15); udelay(25); outl(0xa00f0000, card->io_port + CSR15); udelay(25); spin_unlock_irqrestore(&card->lock, flags); netif_start_queue(card->dev); leave("transceiver_voodoo"); } static void xircom_up(struct xircom_private *card) { unsigned long flags; int i; enter("xircom_up"); /* disable all powermanagement */ pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000); setup_descriptors(card); spin_lock_irqsave(&card->lock, flags); enable_link_interrupt(card); enable_transmit_interrupt(card); enable_receive_interrupt(card); enable_common_interrupts(card); enable_promisc(card); /* The card can have received packets already, read them away now */ for (i=0;i<NUMDESCRIPTORS;i++) investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]); spin_unlock_irqrestore(&card->lock, flags); trigger_receive(card); trigger_transmit(card); netif_start_queue(card->dev); leave("xircom_up"); } /* Bufferoffset is in BYTES */ static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset) { int status; enter("investigate_read_descriptor"); status = le32_to_cpu(card->rx_buffer[4*descnr]); if ((status > 0)) { /* packet received */ /* TODO: discard error packets */ short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */ struct sk_buff *skb; if (pkt_len > 1518) { pr_err("Packet length %i is bogus\n", pkt_len); pkt_len = 1518; } skb = dev_alloc_skb(pkt_len + 2); if (skb == NULL) { dev->stats.rx_dropped++; goto out; } skb_reserve(skb, 2); skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; out: /* give the buffer back to the card */ card->rx_buffer[4*descnr] = cpu_to_le32(0x80000000); trigger_receive(card); } leave("investigate_read_descriptor"); } /* Bufferoffset is in BYTES */ static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset) { int status; enter("investigate_write_descriptor"); status = le32_to_cpu(card->tx_buffer[4*descnr]); #if 0 if (status & 0x8000) { /* Major error */ pr_err("Major transmit error status %x\n", status); card->tx_buffer[4*descnr] = 0; netif_wake_queue (dev); } #endif if (status > 0) { /* bit 31 is 0 when done */ if (card->tx_skb[descnr]!=NULL) { dev->stats.tx_bytes += card->tx_skb[descnr]->len; dev_kfree_skb_irq(card->tx_skb[descnr]); } card->tx_skb[descnr] = NULL; /* Bit 8 in the status field is 1 if there was a collision */ if (status&(1<<8)) dev->stats.collisions++; card->tx_buffer[4*descnr] = 0; /* descriptor is free again */ netif_wake_queue (dev); dev->stats.tx_packets++; } leave("investigate_write_descriptor"); } static int __init xircom_init(void) { return pci_register_driver(&xircom_ops); } static void __exit xircom_exit(void) { pci_unregister_driver(&xircom_ops); } module_init(xircom_init) module_exit(xircom_exit)
gpl-2.0
rockly703/original-linux-2.6.28
drivers/net/tulip/xircom_cb.c
49
32805
/* * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards * * This software is (C) by the respective authors, and licensed under the GPL * License. * * Written by Arjan van de Ven for Red Hat, Inc. * Based on work by Jeff Garzik, Doug Ledford and Donald Becker * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * * $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $ */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <asm/uaccess.h> #include <asm/io.h> #ifdef CONFIG_NET_POLL_CONTROLLER #include <asm/irq.h> #endif #ifdef DEBUG #define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__) #define leave(x) printk("Leave: %s, %s line %i\n",x,__FILE__,__LINE__) #else #define enter(x) do {} while (0) #define leave(x) do {} while (0) #endif MODULE_DESCRIPTION("Xircom Cardbus ethernet driver"); MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>"); MODULE_LICENSE("GPL"); /* IO registers on the card, offsets */ #define CSR0 0x00 #define CSR1 0x08 #define CSR2 0x10 #define CSR3 0x18 #define CSR4 0x20 #define CSR5 0x28 #define CSR6 0x30 #define CSR7 0x38 #define CSR8 0x40 #define CSR9 0x48 #define CSR10 0x50 #define CSR11 0x58 #define CSR12 0x60 #define CSR13 0x68 #define CSR14 0x70 #define CSR15 0x78 #define CSR16 0x80 /* PCI registers */ #define PCI_POWERMGMT 0x40 /* Offsets of the buffers within the descriptor pages, in bytes */ #define NUMDESCRIPTORS 4 static int bufferoffsets[NUMDESCRIPTORS] = {128,2048,4096,6144}; struct xircom_private { /* Send and receive buffers, kernel-addressable and dma addressable forms */ __le32 *rx_buffer; __le32 *tx_buffer; dma_addr_t rx_dma_handle; dma_addr_t tx_dma_handle; struct sk_buff *tx_skb[4]; unsigned long io_port; int open; /* transmit_used is the rotating counter that indicates which transmit descriptor has to be used next */ int transmit_used; /* Spinlock to serialize register operations. It must be helt while manipulating the following registers: CSR0, CSR6, CSR7, CSR9, CSR10, CSR15 */ spinlock_t lock; struct pci_dev *pdev; struct net_device *dev; struct net_device_stats stats; }; /* Function prototypes */ static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void xircom_remove(struct pci_dev *pdev); static irqreturn_t xircom_interrupt(int irq, void *dev_instance); static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev); static int xircom_open(struct net_device *dev); static int xircom_close(struct net_device *dev); static void xircom_up(struct xircom_private *card); static struct net_device_stats *xircom_get_stats(struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER static void xircom_poll_controller(struct net_device *dev); #endif static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset); static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset); static void read_mac_address(struct xircom_private *card); static void transceiver_voodoo(struct xircom_private *card); static void initialize_card(struct xircom_private *card); static void trigger_transmit(struct xircom_private *card); static void trigger_receive(struct xircom_private *card); static void setup_descriptors(struct xircom_private *card); static void remove_descriptors(struct xircom_private *card); static int link_status_changed(struct xircom_private *card); static void activate_receiver(struct xircom_private *card); static void deactivate_receiver(struct xircom_private *card); static void activate_transmitter(struct xircom_private *card); static void deactivate_transmitter(struct xircom_private *card); static void enable_transmit_interrupt(struct xircom_private *card); static void enable_receive_interrupt(struct xircom_private *card); static void enable_link_interrupt(struct xircom_private *card); static void disable_all_interrupts(struct xircom_private *card); static int link_status(struct xircom_private *card); static struct pci_device_id xircom_pci_table[] = { {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,}, {0,}, }; MODULE_DEVICE_TABLE(pci, xircom_pci_table); static struct pci_driver xircom_ops = { .name = "xircom_cb", .id_table = xircom_pci_table, .probe = xircom_probe, .remove = xircom_remove, .suspend =NULL, .resume =NULL }; #ifdef DEBUG static void print_binary(unsigned int number) { int i,i2; char buffer[64]; memset(buffer,0,64); i2=0; for (i=31;i>=0;i--) { if (number & (1<<i)) buffer[i2++]='1'; else buffer[i2++]='0'; if ((i&3)==0) buffer[i2++]=' '; } printk("%s\n",buffer); } #endif static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct xircom_private *private = netdev_priv(dev); strcpy(info->driver, "xircom_cb"); strcpy(info->bus_info, pci_name(private->pdev)); } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, }; /* xircom_probe is the code that gets called on device insertion. it sets up the hardware and registers the device to the networklayer. TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the first two packets that get send, and pump hates that. */ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct net_device *dev = NULL; struct xircom_private *private; unsigned long flags; unsigned short tmp16; enter("xircom_probe"); /* First do the PCI initialisation */ if (pci_enable_device(pdev)) return -ENODEV; /* disable all powermanagement */ pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000); pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/ /* clear PCI status, if any */ pci_read_config_word (pdev,PCI_STATUS, &tmp16); pci_write_config_word (pdev, PCI_STATUS,tmp16); if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { printk(KERN_ERR "xircom_probe: failed to allocate io-region\n"); return -ENODEV; } /* Before changing the hardware, allocate the memory. This way, we can fail gracefully if not enough memory is available. */ dev = alloc_etherdev(sizeof(struct xircom_private)); if (!dev) { printk(KERN_ERR "xircom_probe: failed to allocate etherdev\n"); goto device_fail; } private = netdev_priv(dev); /* Allocate the send/receive buffers */ private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); if (private->rx_buffer == NULL) { printk(KERN_ERR "xircom_probe: no memory for rx buffer \n"); goto rx_buf_fail; } private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); if (private->tx_buffer == NULL) { printk(KERN_ERR "xircom_probe: no memory for tx buffer \n"); goto tx_buf_fail; } SET_NETDEV_DEV(dev, &pdev->dev); private->dev = dev; private->pdev = pdev; private->io_port = pci_resource_start(pdev, 0); spin_lock_init(&private->lock); dev->irq = pdev->irq; dev->base_addr = private->io_port; initialize_card(private); read_mac_address(private); setup_descriptors(private); dev->open = &xircom_open; dev->hard_start_xmit = &xircom_start_xmit; dev->stop = &xircom_close; dev->get_stats = &xircom_get_stats; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = &xircom_poll_controller; #endif SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); pci_set_drvdata(pdev, dev); if (register_netdev(dev)) { printk(KERN_ERR "xircom_probe: netdevice registration failed.\n"); goto reg_fail; } printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, pdev->revision, pdev->irq); /* start the transmitter to get a heartbeat */ /* TODO: send 2 dummy packets here */ transceiver_voodoo(private); spin_lock_irqsave(&private->lock,flags); activate_transmitter(private); activate_receiver(private); spin_unlock_irqrestore(&private->lock,flags); trigger_receive(private); leave("xircom_probe"); return 0; reg_fail: kfree(private->tx_buffer); tx_buf_fail: kfree(private->rx_buffer); rx_buf_fail: free_netdev(dev); device_fail: return -ENODEV; } /* xircom_remove is called on module-unload or on device-eject. it unregisters the irq, io-region and network device. Interrupts and such are already stopped in the "ifconfig ethX down" code. */ static void __devexit xircom_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct xircom_private *card = netdev_priv(dev); enter("xircom_remove"); pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle); pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle); release_region(dev->base_addr, 128); unregister_netdev(dev); free_netdev(dev); pci_set_drvdata(pdev, NULL); leave("xircom_remove"); } static irqreturn_t xircom_interrupt(int irq, void *dev_instance) { struct net_device *dev = (struct net_device *) dev_instance; struct xircom_private *card = netdev_priv(dev); unsigned int status; int i; enter("xircom_interrupt\n"); spin_lock(&card->lock); status = inl(card->io_port+CSR5); #ifdef DEBUG print_binary(status); printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]); printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]); #endif /* Handle shared irq and hotplug */ if (status == 0 || status == 0xffffffff) { spin_unlock(&card->lock); return IRQ_NONE; } if (link_status_changed(card)) { int newlink; printk(KERN_DEBUG "xircom_cb: Link status has changed \n"); newlink = link_status(card); printk(KERN_INFO "xircom_cb: Link is %i mbit \n",newlink); if (newlink) netif_carrier_on(dev); else netif_carrier_off(dev); } /* Clear all remaining interrupts */ status |= 0xffffffff; /* FIXME: make this clear only the real existing bits */ outl(status,card->io_port+CSR5); for (i=0;i<NUMDESCRIPTORS;i++) investigate_write_descriptor(dev,card,i,bufferoffsets[i]); for (i=0;i<NUMDESCRIPTORS;i++) investigate_read_descriptor(dev,card,i,bufferoffsets[i]); spin_unlock(&card->lock); leave("xircom_interrupt"); return IRQ_HANDLED; } static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xircom_private *card; unsigned long flags; int nextdescriptor; int desc; enter("xircom_start_xmit"); card = netdev_priv(dev); spin_lock_irqsave(&card->lock,flags); /* First see if we can free some descriptors */ for (desc=0;desc<NUMDESCRIPTORS;desc++) investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]); nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS); desc = card->transmit_used; /* only send the packet if the descriptor is free */ if (card->tx_buffer[4*desc]==0) { /* Copy the packet data; zero the memory first as the card sometimes sends more than you ask it to. */ memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); skb_copy_from_linear_data(skb, &(card->tx_buffer[bufferoffsets[desc] / 4]), skb->len); /* FIXME: The specification tells us that the length we send HAS to be a multiple of 4 bytes. */ card->tx_buffer[4*desc+1] = cpu_to_le32(skb->len); if (desc == NUMDESCRIPTORS - 1) /* bit 25: last descriptor of the ring */ card->tx_buffer[4*desc+1] |= cpu_to_le32(1<<25); card->tx_buffer[4*desc+1] |= cpu_to_le32(0xF0000000); /* 0xF0... means want interrupts*/ card->tx_skb[desc] = skb; wmb(); /* This gives the descriptor to the card */ card->tx_buffer[4*desc] = cpu_to_le32(0x80000000); trigger_transmit(card); if (card->tx_buffer[nextdescriptor*4] & cpu_to_le32(0x8000000)) { /* next descriptor is occupied... */ netif_stop_queue(dev); } card->transmit_used = nextdescriptor; leave("xircom-start_xmit - sent"); spin_unlock_irqrestore(&card->lock,flags); return 0; } /* Uh oh... no free descriptor... drop the packet */ netif_stop_queue(dev); spin_unlock_irqrestore(&card->lock,flags); trigger_transmit(card); return NETDEV_TX_BUSY; } static int xircom_open(struct net_device *dev) { struct xircom_private *xp = netdev_priv(dev); int retval; enter("xircom_open"); printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq); retval = request_irq(dev->irq, &xircom_interrupt, IRQF_SHARED, dev->name, dev); if (retval) { leave("xircom_open - No IRQ"); return retval; } xircom_up(xp); xp->open = 1; leave("xircom_open"); return 0; } static int xircom_close(struct net_device *dev) { struct xircom_private *card; unsigned long flags; enter("xircom_close"); card = netdev_priv(dev); netif_stop_queue(dev); /* we don't want new packets */ spin_lock_irqsave(&card->lock,flags); disable_all_interrupts(card); #if 0 /* We can enable this again once we send dummy packets on ifconfig ethX up */ deactivate_receiver(card); deactivate_transmitter(card); #endif remove_descriptors(card); spin_unlock_irqrestore(&card->lock,flags); card->open = 0; free_irq(dev->irq,dev); leave("xircom_close"); return 0; } static struct net_device_stats *xircom_get_stats(struct net_device *dev) { struct xircom_private *card = netdev_priv(dev); return &card->stats; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xircom_poll_controller(struct net_device *dev) { disable_irq(dev->irq); xircom_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif static void initialize_card(struct xircom_private *card) { unsigned int val; unsigned long flags; enter("initialize_card"); spin_lock_irqsave(&card->lock, flags); /* First: reset the card */ val = inl(card->io_port + CSR0); val |= 0x01; /* Software reset */ outl(val, card->io_port + CSR0); udelay(100); /* give the card some time to reset */ val = inl(card->io_port + CSR0); val &= ~0x01; /* disable Software reset */ outl(val, card->io_port + CSR0); val = 0; /* Value 0x00 is a safe and conservative value for the PCI configuration settings */ outl(val, card->io_port + CSR0); disable_all_interrupts(card); deactivate_receiver(card); deactivate_transmitter(card); spin_unlock_irqrestore(&card->lock, flags); leave("initialize_card"); } /* trigger_transmit causes the card to check for frames to be transmitted. This is accomplished by writing to the CSR1 port. The documentation claims that the act of writing is sufficient and that the value is ignored; I chose zero. */ static void trigger_transmit(struct xircom_private *card) { unsigned int val; enter("trigger_transmit"); val = 0; outl(val, card->io_port + CSR1); leave("trigger_transmit"); } /* trigger_receive causes the card to check for empty frames in the descriptor list in which packets can be received. This is accomplished by writing to the CSR2 port. The documentation claims that the act of writing is sufficient and that the value is ignored; I chose zero. */ static void trigger_receive(struct xircom_private *card) { unsigned int val; enter("trigger_receive"); val = 0; outl(val, card->io_port + CSR2); leave("trigger_receive"); } /* setup_descriptors initializes the send and receive buffers to be valid descriptors and programs the addresses into the card. */ static void setup_descriptors(struct xircom_private *card) { u32 address; int i; enter("setup_descriptors"); BUG_ON(card->rx_buffer == NULL); BUG_ON(card->tx_buffer == NULL); /* Receive descriptors */ memset(card->rx_buffer, 0, 128); /* clear the descriptors */ for (i=0;i<NUMDESCRIPTORS;i++ ) { /* Rx Descr0: It's empty, let the card own it, no errors -> 0x80000000 */ card->rx_buffer[i*4 + 0] = cpu_to_le32(0x80000000); /* Rx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */ card->rx_buffer[i*4 + 1] = cpu_to_le32(1536); if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */ card->rx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25); /* Rx Descr2: address of the buffer we store the buffer at the 2nd half of the page */ address = card->rx_dma_handle; card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]); /* Rx Desc3: address of 2nd buffer -> 0 */ card->rx_buffer[i*4 + 3] = 0; } wmb(); /* Write the receive descriptor ring address to the card */ address = card->rx_dma_handle; outl(address, card->io_port + CSR3); /* Receive descr list address */ /* transmit descriptors */ memset(card->tx_buffer, 0, 128); /* clear the descriptors */ for (i=0;i<NUMDESCRIPTORS;i++ ) { /* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */ card->tx_buffer[i*4 + 0] = 0x00000000; /* Tx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */ card->tx_buffer[i*4 + 1] = cpu_to_le32(1536); if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */ card->tx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25); /* Tx Descr2: address of the buffer we store the buffer at the 2nd half of the page */ address = card->tx_dma_handle; card->tx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]); /* Tx Desc3: address of 2nd buffer -> 0 */ card->tx_buffer[i*4 + 3] = 0; } wmb(); /* wite the transmit descriptor ring to the card */ address = card->tx_dma_handle; outl(address, card->io_port + CSR4); /* xmit descr list address */ leave("setup_descriptors"); } /* remove_descriptors informs the card the descriptors are no longer valid by setting the address in the card to 0x00. */ static void remove_descriptors(struct xircom_private *card) { unsigned int val; enter("remove_descriptors"); val = 0; outl(val, card->io_port + CSR3); /* Receive descriptor address */ outl(val, card->io_port + CSR4); /* Send descriptor address */ leave("remove_descriptors"); } /* link_status_changed returns 1 if the card has indicated that the link status has changed. The new link status has to be read from CSR12. This function also clears the status-bit. */ static int link_status_changed(struct xircom_private *card) { unsigned int val; enter("link_status_changed"); val = inl(card->io_port + CSR5); /* Status register */ if ((val & (1 << 27)) == 0) { /* no change */ leave("link_status_changed - nochange"); return 0; } /* clear the event by writing a 1 to the bit in the status register. */ val = (1 << 27); outl(val, card->io_port + CSR5); leave("link_status_changed - changed"); return 1; } /* transmit_active returns 1 if the transmitter on the card is in a non-stopped state. */ static int transmit_active(struct xircom_private *card) { unsigned int val; enter("transmit_active"); val = inl(card->io_port + CSR5); /* Status register */ if ((val & (7 << 20)) == 0) { /* transmitter disabled */ leave("transmit_active - inactive"); return 0; } leave("transmit_active - active"); return 1; } /* receive_active returns 1 if the receiver on the card is in a non-stopped state. */ static int receive_active(struct xircom_private *card) { unsigned int val; enter("receive_active"); val = inl(card->io_port + CSR5); /* Status register */ if ((val & (7 << 17)) == 0) { /* receiver disabled */ leave("receive_active - inactive"); return 0; } leave("receive_active - active"); return 1; } /* activate_receiver enables the receiver on the card. Before being allowed to active the receiver, the receiver must be completely de-activated. To achieve this, this code actually disables the receiver first; then it waits for the receiver to become inactive, then it activates the receiver and then it waits for the receiver to be active. must be called with the lock held and interrupts disabled. */ static void activate_receiver(struct xircom_private *card) { unsigned int val; int counter; enter("activate_receiver"); val = inl(card->io_port + CSR6); /* Operation mode */ /* If the "active" bit is set and the receiver is already active, no need to do the expensive thing */ if ((val&2) && (receive_active(card))) return; val = val & ~2; /* disable the receiver */ outl(val, card->io_port + CSR6); counter = 10; while (counter > 0) { if (!receive_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n"); } /* enable the receiver */ val = inl(card->io_port + CSR6); /* Operation mode */ val = val | 2; /* enable the receiver */ outl(val, card->io_port + CSR6); /* now wait for the card to activate again */ counter = 10; while (counter > 0) { if (receive_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) printk(KERN_ERR "xircom_cb: Receiver failed to re-activate\n"); } leave("activate_receiver"); } /* deactivate_receiver disables the receiver on the card. To achieve this this code disables the receiver first; then it waits for the receiver to become inactive. must be called with the lock held and interrupts disabled. */ static void deactivate_receiver(struct xircom_private *card) { unsigned int val; int counter; enter("deactivate_receiver"); val = inl(card->io_port + CSR6); /* Operation mode */ val = val & ~2; /* disable the receiver */ outl(val, card->io_port + CSR6); counter = 10; while (counter > 0) { if (!receive_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n"); } leave("deactivate_receiver"); } /* activate_transmitter enables the transmitter on the card. Before being allowed to active the transmitter, the transmitter must be completely de-activated. To achieve this, this code actually disables the transmitter first; then it waits for the transmitter to become inactive, then it activates the transmitter and then it waits for the transmitter to be active again. must be called with the lock held and interrupts disabled. */ static void activate_transmitter(struct xircom_private *card) { unsigned int val; int counter; enter("activate_transmitter"); val = inl(card->io_port + CSR6); /* Operation mode */ /* If the "active" bit is set and the receiver is already active, no need to do the expensive thing */ if ((val&(1<<13)) && (transmit_active(card))) return; val = val & ~(1 << 13); /* disable the transmitter */ outl(val, card->io_port + CSR6); counter = 10; while (counter > 0) { if (!transmit_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n"); } /* enable the transmitter */ val = inl(card->io_port + CSR6); /* Operation mode */ val = val | (1 << 13); /* enable the transmitter */ outl(val, card->io_port + CSR6); /* now wait for the card to activate again */ counter = 10; while (counter > 0) { if (transmit_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) printk(KERN_ERR "xircom_cb: Transmitter failed to re-activate\n"); } leave("activate_transmitter"); } /* deactivate_transmitter disables the transmitter on the card. To achieve this this code disables the transmitter first; then it waits for the transmitter to become inactive. must be called with the lock held and interrupts disabled. */ static void deactivate_transmitter(struct xircom_private *card) { unsigned int val; int counter; enter("deactivate_transmitter"); val = inl(card->io_port + CSR6); /* Operation mode */ val = val & ~2; /* disable the transmitter */ outl(val, card->io_port + CSR6); counter = 20; while (counter > 0) { if (!transmit_active(card)) break; /* wait a while */ udelay(50); counter--; if (counter <= 0) printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n"); } leave("deactivate_transmitter"); } /* enable_transmit_interrupt enables the transmit interrupt must be called with the lock held and interrupts disabled. */ static void enable_transmit_interrupt(struct xircom_private *card) { unsigned int val; enter("enable_transmit_interrupt"); val = inl(card->io_port + CSR7); /* Interrupt enable register */ val |= 1; /* enable the transmit interrupt */ outl(val, card->io_port + CSR7); leave("enable_transmit_interrupt"); } /* enable_receive_interrupt enables the receive interrupt must be called with the lock held and interrupts disabled. */ static void enable_receive_interrupt(struct xircom_private *card) { unsigned int val; enter("enable_receive_interrupt"); val = inl(card->io_port + CSR7); /* Interrupt enable register */ val = val | (1 << 6); /* enable the receive interrupt */ outl(val, card->io_port + CSR7); leave("enable_receive_interrupt"); } /* enable_link_interrupt enables the link status change interrupt must be called with the lock held and interrupts disabled. */ static void enable_link_interrupt(struct xircom_private *card) { unsigned int val; enter("enable_link_interrupt"); val = inl(card->io_port + CSR7); /* Interrupt enable register */ val = val | (1 << 27); /* enable the link status chage interrupt */ outl(val, card->io_port + CSR7); leave("enable_link_interrupt"); } /* disable_all_interrupts disables all interrupts must be called with the lock held and interrupts disabled. */ static void disable_all_interrupts(struct xircom_private *card) { unsigned int val; enter("enable_all_interrupts"); val = 0; /* disable all interrupts */ outl(val, card->io_port + CSR7); leave("disable_all_interrupts"); } /* enable_common_interrupts enables several weird interrupts must be called with the lock held and interrupts disabled. */ static void enable_common_interrupts(struct xircom_private *card) { unsigned int val; enter("enable_link_interrupt"); val = inl(card->io_port + CSR7); /* Interrupt enable register */ val |= (1<<16); /* Normal Interrupt Summary */ val |= (1<<15); /* Abnormal Interrupt Summary */ val |= (1<<13); /* Fatal bus error */ val |= (1<<8); /* Receive Process Stopped */ val |= (1<<7); /* Receive Buffer Unavailable */ val |= (1<<5); /* Transmit Underflow */ val |= (1<<2); /* Transmit Buffer Unavailable */ val |= (1<<1); /* Transmit Process Stopped */ outl(val, card->io_port + CSR7); leave("enable_link_interrupt"); } /* enable_promisc starts promisc mode must be called with the lock held and interrupts disabled. */ static int enable_promisc(struct xircom_private *card) { unsigned int val; enter("enable_promisc"); val = inl(card->io_port + CSR6); val = val | (1 << 6); outl(val, card->io_port + CSR6); leave("enable_promisc"); return 1; } /* link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. Must be called in locked state with interrupts disabled */ static int link_status(struct xircom_private *card) { unsigned int val; enter("link_status"); val = inb(card->io_port + CSR12); if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ return 10; if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ return 100; /* If we get here -> no link at all */ leave("link_status"); return 0; } /* read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure. This function will take the spinlock itself and can, as a result, not be called with the lock helt. */ static void read_mac_address(struct xircom_private *card) { unsigned char j, tuple, link, data_id, data_count; unsigned long flags; int i; DECLARE_MAC_BUF(mac); enter("read_mac_address"); spin_lock_irqsave(&card->lock, flags); outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */ for (i = 0x100; i < 0x1f7; i += link + 2) { outl(i, card->io_port + CSR10); tuple = inl(card->io_port + CSR9) & 0xff; outl(i + 1, card->io_port + CSR10); link = inl(card->io_port + CSR9) & 0xff; outl(i + 2, card->io_port + CSR10); data_id = inl(card->io_port + CSR9) & 0xff; outl(i + 3, card->io_port + CSR10); data_count = inl(card->io_port + CSR9) & 0xff; if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { /* * This is it. We have the data we want. */ for (j = 0; j < 6; j++) { outl(i + j + 4, card->io_port + CSR10); card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff; } break; } else if (link == 0) { break; } } spin_unlock_irqrestore(&card->lock, flags); pr_debug(" %s\n", print_mac(mac, card->dev->dev_addr)); leave("read_mac_address"); } /* transceiver_voodoo() enables the external UTP plug thingy. it's called voodoo as I stole this code and cannot cross-reference it with the specification. */ static void transceiver_voodoo(struct xircom_private *card) { unsigned long flags; enter("transceiver_voodoo"); /* disable all powermanagement */ pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000); setup_descriptors(card); spin_lock_irqsave(&card->lock, flags); outl(0x0008, card->io_port + CSR15); udelay(25); outl(0xa8050000, card->io_port + CSR15); udelay(25); outl(0xa00f0000, card->io_port + CSR15); udelay(25); spin_unlock_irqrestore(&card->lock, flags); netif_start_queue(card->dev); leave("transceiver_voodoo"); } static void xircom_up(struct xircom_private *card) { unsigned long flags; int i; enter("xircom_up"); /* disable all powermanagement */ pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000); setup_descriptors(card); spin_lock_irqsave(&card->lock, flags); enable_link_interrupt(card); enable_transmit_interrupt(card); enable_receive_interrupt(card); enable_common_interrupts(card); enable_promisc(card); /* The card can have received packets already, read them away now */ for (i=0;i<NUMDESCRIPTORS;i++) investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]); spin_unlock_irqrestore(&card->lock, flags); trigger_receive(card); trigger_transmit(card); netif_start_queue(card->dev); leave("xircom_up"); } /* Bufferoffset is in BYTES */ static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset) { int status; enter("investigate_read_descriptor"); status = le32_to_cpu(card->rx_buffer[4*descnr]); if ((status > 0)) { /* packet received */ /* TODO: discard error packets */ short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */ struct sk_buff *skb; if (pkt_len > 1518) { printk(KERN_ERR "xircom_cb: Packet length %i is bogus \n",pkt_len); pkt_len = 1518; } skb = dev_alloc_skb(pkt_len + 2); if (skb == NULL) { card->stats.rx_dropped++; goto out; } skb_reserve(skb, 2); skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; card->stats.rx_packets++; card->stats.rx_bytes += pkt_len; out: /* give the buffer back to the card */ card->rx_buffer[4*descnr] = cpu_to_le32(0x80000000); trigger_receive(card); } leave("investigate_read_descriptor"); } /* Bufferoffset is in BYTES */ static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset) { int status; enter("investigate_write_descriptor"); status = le32_to_cpu(card->tx_buffer[4*descnr]); #if 0 if (status & 0x8000) { /* Major error */ printk(KERN_ERR "Major transmit error status %x \n", status); card->tx_buffer[4*descnr] = 0; netif_wake_queue (dev); } #endif if (status > 0) { /* bit 31 is 0 when done */ if (card->tx_skb[descnr]!=NULL) { card->stats.tx_bytes += card->tx_skb[descnr]->len; dev_kfree_skb_irq(card->tx_skb[descnr]); } card->tx_skb[descnr] = NULL; /* Bit 8 in the status field is 1 if there was a collision */ if (status&(1<<8)) card->stats.collisions++; card->tx_buffer[4*descnr] = 0; /* descriptor is free again */ netif_wake_queue (dev); card->stats.tx_packets++; } leave("investigate_write_descriptor"); } static int __init xircom_init(void) { return pci_register_driver(&xircom_ops); } static void __exit xircom_exit(void) { pci_unregister_driver(&xircom_ops); } module_init(xircom_init) module_exit(xircom_exit)
gpl-2.0
c0ffee/linux-2.6.34-ts471x
drivers/ata/pata_jmicron.c
49
4693
/* * pata_jmicron.c - JMicron ATA driver for non AHCI mode. This drives the * PATA port of the controller. The SATA ports are * driven by AHCI in the usual configuration although * this driver can handle other setups if we need it. * * (c) 2006 Red Hat */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/ata.h> #define DRV_NAME "pata_jmicron" #define DRV_VERSION "0.1.5" typedef enum { PORT_PATA0 = 0, PORT_PATA1 = 1, PORT_SATA = 2, } port_type; /** * jmicron_pre_reset - check for 40/80 pin * @link: ATA link * @deadline: deadline jiffies for the operation * * Perform the PATA port setup we need. * * On the Jmicron 361/363 there is a single PATA port that can be mapped * either as primary or secondary (or neither). We don't do any policy * and setup here. We assume that has been done by init_one and the * BIOS. */ static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 control; u32 control5; int port_mask = 1<< (4 * ap->port_no); int port = ap->port_no; port_type port_map[2]; /* Check if our port is enabled */ pci_read_config_dword(pdev, 0x40, &control); if ((control & port_mask) == 0) return -ENOENT; /* There are two basic mappings. One has the two SATA ports merged as master/slave and the secondary as PATA, the other has only the SATA port mapped */ if (control & (1 << 23)) { port_map[0] = PORT_SATA; port_map[1] = PORT_PATA0; } else { port_map[0] = PORT_SATA; port_map[1] = PORT_SATA; } /* The 365/366 may have this bit set to map the second PATA port as the internal primary channel */ pci_read_config_dword(pdev, 0x80, &control5); if (control5 & (1<<24)) port_map[0] = PORT_PATA1; /* The two ports may then be logically swapped by the firmware */ if (control & (1 << 22)) port = port ^ 1; /* * Now we know which physical port we are talking about we can * actually do our cable checking etc. Thankfully we don't need * to do the plumbing for other cases. */ switch (port_map[port]) { case PORT_PATA0: if ((control & (1 << 5)) == 0) return -ENOENT; if (control & (1 << 3)) /* 40/80 pin primary */ ap->cbl = ATA_CBL_PATA40; else ap->cbl = ATA_CBL_PATA80; break; case PORT_PATA1: /* Bit 21 is set if the port is enabled */ if ((control5 & (1 << 21)) == 0) return -ENOENT; if (control5 & (1 << 19)) /* 40/80 pin secondary */ ap->cbl = ATA_CBL_PATA40; else ap->cbl = ATA_CBL_PATA80; break; case PORT_SATA: ap->cbl = ATA_CBL_SATA; break; } return ata_sff_prereset(link, deadline); } /* No PIO or DMA methods needed for this device */ static struct scsi_host_template jmicron_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations jmicron_ops = { .inherits = &ata_bmdma_port_ops, .prereset = jmicron_pre_reset, }; /** * jmicron_init_one - Register Jmicron ATA PCI device with kernel services * @pdev: PCI device to register * @ent: Entry in jmicron_pci_tbl matching with @pdev * * Called from kernel PCI layer. * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &jmicron_ops, }; const struct ata_port_info *ppi[] = { &info, NULL }; return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL, 0); } static const struct pci_device_id jmicron_pci_tbl[] = { { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 }, { } /* terminate list */ }; static struct pci_driver jmicron_pci_driver = { .name = DRV_NAME, .id_table = jmicron_pci_tbl, .probe = jmicron_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init jmicron_init(void) { return pci_register_driver(&jmicron_pci_driver); } static void __exit jmicron_exit(void) { pci_unregister_driver(&jmicron_pci_driver); } module_init(jmicron_init); module_exit(jmicron_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
drhonk/Bali_SK4G
arch/arm/common/clkdev.c
561
3511
/* * arch/arm/common/clkdev.c * * Copyright (C) 2008 Russell King. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Helper for the clk API to assist looking up a struct clk. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/string.h> #include <linux/mutex.h> #include <linux/clk.h> #include <asm/clkdev.h> #include <mach/clkdev.h> static LIST_HEAD(clocks); static DEFINE_MUTEX(clocks_mutex); /* * Find the correct struct clk for the device and connection ID. * We do slightly fuzzy matching here: * An entry with a NULL ID is assumed to be a wildcard. * If an entry has a device ID, it must match * If an entry has a connection ID, it must match * Then we take the most specific entry - with the following * order of precidence: dev+con > dev only > con only. */ static struct clk *clk_find(const char *dev_id, const char *con_id) { struct clk_lookup *p; struct clk *clk = NULL; int match, best = 0; list_for_each_entry(p, &clocks, node) { match = 0; if (p->dev_id) { if (!dev_id || strcmp(p->dev_id, dev_id)) continue; match += 2; } if (p->con_id) { if (!con_id || strcmp(p->con_id, con_id)) continue; match += 1; } if (match == 0) continue; if (match > best) { clk = p->clk; best = match; } } return clk; } struct clk *clk_get_sys(const char *dev_id, const char *con_id) { struct clk *clk; mutex_lock(&clocks_mutex); clk = clk_find(dev_id, con_id); if (clk && !__clk_get(clk)) clk = NULL; mutex_unlock(&clocks_mutex); return clk ? clk : ERR_PTR(-ENOENT); } EXPORT_SYMBOL(clk_get_sys); struct clk *clk_get(struct device *dev, const char *con_id) { const char *dev_id = dev ? dev_name(dev) : NULL; return clk_get_sys(dev_id, con_id); } EXPORT_SYMBOL(clk_get); void clk_put(struct clk *clk) { __clk_put(clk); } EXPORT_SYMBOL(clk_put); void clkdev_add(struct clk_lookup *cl) { mutex_lock(&clocks_mutex); list_add_tail(&cl->node, &clocks); mutex_unlock(&clocks_mutex); } EXPORT_SYMBOL(clkdev_add); #define MAX_DEV_ID 20 #define MAX_CON_ID 16 struct clk_lookup_alloc { struct clk_lookup cl; char dev_id[MAX_DEV_ID]; char con_id[MAX_CON_ID]; }; struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) { struct clk_lookup_alloc *cla; cla = kzalloc(sizeof(*cla), GFP_KERNEL); if (!cla) return NULL; cla->cl.clk = clk; if (con_id) { strlcpy(cla->con_id, con_id, sizeof(cla->con_id)); cla->cl.con_id = cla->con_id; } if (dev_fmt) { va_list ap; va_start(ap, dev_fmt); vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap); cla->cl.dev_id = cla->dev_id; va_end(ap); } return &cla->cl; } EXPORT_SYMBOL(clkdev_alloc); int clk_add_alias(const char *alias, const char *alias_dev_name, char *id, struct device *dev) { struct clk *r = clk_get(dev, id); struct clk_lookup *l; if (IS_ERR(r)) return PTR_ERR(r); l = clkdev_alloc(r, alias, alias_dev_name); clk_put(r); if (!l) return -ENODEV; clkdev_add(l); return 0; } EXPORT_SYMBOL(clk_add_alias); /* * clkdev_drop - remove a clock dynamically allocated */ void clkdev_drop(struct clk_lookup *cl) { mutex_lock(&clocks_mutex); list_del(&cl->node); mutex_unlock(&clocks_mutex); kfree(cl); } EXPORT_SYMBOL(clkdev_drop);
gpl-2.0
kodos96/backport
drivers/rtc/rtc-m48t86.c
1585
5819
/* * ST M48T86 / Dallas DS12887 RTC driver * Copyright (c) 2006 Tower Technologies * * Author: Alessandro Zummo <a.zummo@towertech.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This drivers only supports the clock running in BCD and 24H mode. * If it will be ever adapted to binary and 12H mode, care must be taken * to not introduce bugs. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include <linux/m48t86.h> #include <linux/bcd.h> #define M48T86_REG_SEC 0x00 #define M48T86_REG_SECALRM 0x01 #define M48T86_REG_MIN 0x02 #define M48T86_REG_MINALRM 0x03 #define M48T86_REG_HOUR 0x04 #define M48T86_REG_HOURALRM 0x05 #define M48T86_REG_DOW 0x06 /* 1 = sunday */ #define M48T86_REG_DOM 0x07 #define M48T86_REG_MONTH 0x08 /* 1 - 12 */ #define M48T86_REG_YEAR 0x09 /* 0 - 99 */ #define M48T86_REG_A 0x0A #define M48T86_REG_B 0x0B #define M48T86_REG_C 0x0C #define M48T86_REG_D 0x0D #define M48T86_REG_B_H24 (1 << 1) #define M48T86_REG_B_DM (1 << 2) #define M48T86_REG_B_SET (1 << 7) #define M48T86_REG_D_VRT (1 << 7) #define DRV_VERSION "0.1" static int m48t86_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned char reg; struct platform_device *pdev = to_platform_device(dev); struct m48t86_ops *ops = pdev->dev.platform_data; reg = ops->readbyte(M48T86_REG_B); if (reg & M48T86_REG_B_DM) { /* data (binary) mode */ tm->tm_sec = ops->readbyte(M48T86_REG_SEC); tm->tm_min = ops->readbyte(M48T86_REG_MIN); tm->tm_hour = ops->readbyte(M48T86_REG_HOUR) & 0x3F; tm->tm_mday = ops->readbyte(M48T86_REG_DOM); /* tm_mon is 0-11 */ tm->tm_mon = ops->readbyte(M48T86_REG_MONTH) - 1; tm->tm_year = ops->readbyte(M48T86_REG_YEAR) + 100; tm->tm_wday = ops->readbyte(M48T86_REG_DOW); } else { /* bcd mode */ tm->tm_sec = bcd2bin(ops->readbyte(M48T86_REG_SEC)); tm->tm_min = bcd2bin(ops->readbyte(M48T86_REG_MIN)); tm->tm_hour = bcd2bin(ops->readbyte(M48T86_REG_HOUR) & 0x3F); tm->tm_mday = bcd2bin(ops->readbyte(M48T86_REG_DOM)); /* tm_mon is 0-11 */ tm->tm_mon = bcd2bin(ops->readbyte(M48T86_REG_MONTH)) - 1; tm->tm_year = bcd2bin(ops->readbyte(M48T86_REG_YEAR)) + 100; tm->tm_wday = bcd2bin(ops->readbyte(M48T86_REG_DOW)); } /* correct the hour if the clock is in 12h mode */ if (!(reg & M48T86_REG_B_H24)) if (ops->readbyte(M48T86_REG_HOUR) & 0x80) tm->tm_hour += 12; return 0; } static int m48t86_rtc_set_time(struct device *dev, struct rtc_time *tm) { unsigned char reg; struct platform_device *pdev = to_platform_device(dev); struct m48t86_ops *ops = pdev->dev.platform_data; reg = ops->readbyte(M48T86_REG_B); /* update flag and 24h mode */ reg |= M48T86_REG_B_SET | M48T86_REG_B_H24; ops->writebyte(reg, M48T86_REG_B); if (reg & M48T86_REG_B_DM) { /* data (binary) mode */ ops->writebyte(tm->tm_sec, M48T86_REG_SEC); ops->writebyte(tm->tm_min, M48T86_REG_MIN); ops->writebyte(tm->tm_hour, M48T86_REG_HOUR); ops->writebyte(tm->tm_mday, M48T86_REG_DOM); ops->writebyte(tm->tm_mon + 1, M48T86_REG_MONTH); ops->writebyte(tm->tm_year % 100, M48T86_REG_YEAR); ops->writebyte(tm->tm_wday, M48T86_REG_DOW); } else { /* bcd mode */ ops->writebyte(bin2bcd(tm->tm_sec), M48T86_REG_SEC); ops->writebyte(bin2bcd(tm->tm_min), M48T86_REG_MIN); ops->writebyte(bin2bcd(tm->tm_hour), M48T86_REG_HOUR); ops->writebyte(bin2bcd(tm->tm_mday), M48T86_REG_DOM); ops->writebyte(bin2bcd(tm->tm_mon + 1), M48T86_REG_MONTH); ops->writebyte(bin2bcd(tm->tm_year % 100), M48T86_REG_YEAR); ops->writebyte(bin2bcd(tm->tm_wday), M48T86_REG_DOW); } /* update ended */ reg &= ~M48T86_REG_B_SET; ops->writebyte(reg, M48T86_REG_B); return 0; } static int m48t86_rtc_proc(struct device *dev, struct seq_file *seq) { unsigned char reg; struct platform_device *pdev = to_platform_device(dev); struct m48t86_ops *ops = pdev->dev.platform_data; reg = ops->readbyte(M48T86_REG_B); seq_printf(seq, "mode\t\t: %s\n", (reg & M48T86_REG_B_DM) ? "binary" : "bcd"); reg = ops->readbyte(M48T86_REG_D); seq_printf(seq, "battery\t\t: %s\n", (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); return 0; } static const struct rtc_class_ops m48t86_rtc_ops = { .read_time = m48t86_rtc_read_time, .set_time = m48t86_rtc_set_time, .proc = m48t86_rtc_proc, }; static int __devinit m48t86_rtc_probe(struct platform_device *dev) { unsigned char reg; struct m48t86_ops *ops = dev->dev.platform_data; struct rtc_device *rtc = rtc_device_register("m48t86", &dev->dev, &m48t86_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); platform_set_drvdata(dev, rtc); /* read battery status */ reg = ops->readbyte(M48T86_REG_D); dev_info(&dev->dev, "battery %s\n", (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); return 0; } static int __devexit m48t86_rtc_remove(struct platform_device *dev) { struct rtc_device *rtc = platform_get_drvdata(dev); if (rtc) rtc_device_unregister(rtc); platform_set_drvdata(dev, NULL); return 0; } static struct platform_driver m48t86_rtc_platform_driver = { .driver = { .name = "rtc-m48t86", .owner = THIS_MODULE, }, .probe = m48t86_rtc_probe, .remove = __devexit_p(m48t86_rtc_remove), }; static int __init m48t86_rtc_init(void) { return platform_driver_register(&m48t86_rtc_platform_driver); } static void __exit m48t86_rtc_exit(void) { platform_driver_unregister(&m48t86_rtc_platform_driver); } MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); MODULE_DESCRIPTION("M48T86 RTC driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:rtc-m48t86"); module_init(m48t86_rtc_init); module_exit(m48t86_rtc_exit);
gpl-2.0
ashishtanwer/NFTable-porting-on-Android-Goldfish
drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
2097
2864
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <engine/software.h> #include <engine/disp.h> #include <core/class.h> #include "nv50.h" static struct nouveau_oclass nvf0_disp_sclass[] = { { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, {} }; static struct nouveau_oclass nvf0_disp_base_oclass[] = { { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, {} }; static int nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_disp_priv *priv; int heads = nv_rd32(parent, 0x022448); int ret; ret = nouveau_disp_create(parent, engine, oclass, heads, "PDISP", "display", &priv); *pobject = nv_object(priv); if (ret) return ret; nv_engine(priv)->sclass = nvf0_disp_base_oclass; nv_engine(priv)->cclass = &nv50_disp_cclass; nv_subdev(priv)->intr = nvd0_disp_intr; INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor); priv->sclass = nvf0_disp_sclass; priv->head.nr = heads; priv->dac.nr = 3; priv->sor.nr = 4; priv->dac.power = nv50_dac_power; priv->dac.sense = nv50_dac_sense; priv->sor.power = nv50_sor_power; priv->sor.hda_eld = nvd0_hda_eld; priv->sor.hdmi = nvd0_hdmi_ctrl; priv->sor.dp = &nvd0_sor_dp_func; return 0; } struct nouveau_oclass nvf0_disp_oclass = { .handle = NV_ENGINE(DISP, 0x92), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvf0_disp_ctor, .dtor = _nouveau_disp_dtor, .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, };
gpl-2.0
glewarne/S6-UniBase
drivers/usb/gadget/ether.c
2097
11458
/* * ether.c -- Ethernet gadget driver, with CDC and non-CDC options * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* #define VERBOSE_DEBUG */ #include <linux/kernel.h> #if defined USB_ETH_RNDIS # undef USB_ETH_RNDIS #endif #ifdef CONFIG_USB_ETH_RNDIS # define USB_ETH_RNDIS y #endif #include "u_ether.h" /* * Ethernet gadget driver -- with CDC and non-CDC options * Builds on hardware support for a full duplex link. * * CDC Ethernet is the standard USB solution for sending Ethernet frames * using USB. Real hardware tends to use the same framing protocol but look * different for control features. This driver strongly prefers to use * this USB-IF standard as its open-systems interoperability solution; * most host side USB stacks (except from Microsoft) support it. * * This is sometimes called "CDC ECM" (Ethernet Control Model) to support * TLA-soup. "CDC ACM" (Abstract Control Model) is for modems, and a new * "CDC EEM" (Ethernet Emulation Model) is starting to spread. * * There's some hardware that can't talk CDC ECM. We make that hardware * implement a "minimalist" vendor-agnostic CDC core: same framing, but * link-level setup only requires activating the configuration. Only the * endpoint descriptors, and product/vendor IDs, are relevant; no control * operations are available. Linux supports it, but other host operating * systems may not. (This is a subset of CDC Ethernet.) * * It turns out that if you add a few descriptors to that "CDC Subset", * (Windows) host side drivers from MCCI can treat it as one submode of * a proprietary scheme called "SAFE" ... without needing to know about * specific product/vendor IDs. So we do that, making it easier to use * those MS-Windows drivers. Those added descriptors make it resemble a * CDC MDLM device, but they don't change device behavior at all. (See * MCCI Engineering report 950198 "SAFE Networking Functions".) * * A third option is also in use. Rather than CDC Ethernet, or something * simpler, Microsoft pushes their own approach: RNDIS. The published * RNDIS specs are ambiguous and appear to be incomplete, and are also * needlessly complex. They borrow more from CDC ACM than CDC ECM. */ #define DRIVER_DESC "Ethernet Gadget" #define DRIVER_VERSION "Memorial Day 2008" #ifdef USB_ETH_RNDIS #define PREFIX "RNDIS/" #else #define PREFIX "" #endif /* * This driver aims for interoperability by using CDC ECM unless * * can_support_ecm() * * returns false, in which case it supports the CDC Subset. By default, * that returns true; most hardware has no problems with CDC ECM, that's * a good default. Previous versions of this driver had no default; this * version changes that, removing overhead for new controller support. * * IF YOUR HARDWARE CAN'T SUPPORT CDC ECM, UPDATE THAT ROUTINE! */ static inline bool has_rndis(void) { #ifdef USB_ETH_RNDIS return true; #else return false; #endif } /*-------------------------------------------------------------------------*/ /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "f_ecm.c" #include "f_subset.c" #ifdef USB_ETH_RNDIS #include "f_rndis.c" #include "rndis.c" #endif #include "f_eem.c" #include "u_ether.c" /*-------------------------------------------------------------------------*/ USB_GADGET_COMPOSITE_OPTIONS(); /* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ /* Thanks to NetChip Technologies for donating this product ID. * It's for devices with only CDC Ethernet configurations. */ #define CDC_VENDOR_NUM 0x0525 /* NetChip */ #define CDC_PRODUCT_NUM 0xa4a1 /* Linux-USB Ethernet Gadget */ /* For hardware that can't talk CDC, we use the same vendor ID that * ARM Linux has used for ethernet-over-usb, both with sa1100 and * with pxa250. We're protocol-compatible, if the host-side drivers * use the endpoint descriptors. bcdDevice (version) is nonzero, so * drivers that need to hard-wire endpoint numbers have a hook. * * The protocol is a minimal subset of CDC Ether, which works on any bulk * hardware that's not deeply broken ... even on hardware that can't talk * RNDIS (like SA-1100, with no interrupt endpoint, or anything that * doesn't handle control-OUT). */ #define SIMPLE_VENDOR_NUM 0x049f #define SIMPLE_PRODUCT_NUM 0x505a /* For hardware that can talk RNDIS and either of the above protocols, * use this ID ... the windows INF files will know it. Unless it's * used with CDC Ethernet, Linux 2.4 hosts will need updates to choose * the non-RNDIS configuration. */ #define RNDIS_VENDOR_NUM 0x0525 /* NetChip */ #define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */ /* For EEM gadgets */ #define EEM_VENDOR_NUM 0x1d6b /* Linux Foundation */ #define EEM_PRODUCT_NUM 0x0102 /* EEM Gadget */ /*-------------------------------------------------------------------------*/ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16 (0x0200), .bDeviceClass = USB_CLASS_COMM, .bDeviceSubClass = 0, .bDeviceProtocol = 0, /* .bMaxPacketSize0 = f(hardware) */ /* Vendor and product id defaults change according to what configs * we support. (As does bNumConfigurations.) These values can * also be overridden by module parameters. */ .idVendor = cpu_to_le16 (CDC_VENDOR_NUM), .idProduct = cpu_to_le16 (CDC_PRODUCT_NUM), /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ /* NO SERIAL NUMBER */ .bNumConfigurations = 1, }; static struct usb_otg_descriptor otg_descriptor = { .bLength = sizeof otg_descriptor, .bDescriptorType = USB_DT_OTG, /* REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &otg_descriptor, NULL, }; static struct usb_string strings_dev[] = { [USB_GADGET_MANUFACTURER_IDX].s = "", [USB_GADGET_PRODUCT_IDX].s = PREFIX DRIVER_DESC, [USB_GADGET_SERIAL_IDX].s = "", { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; static u8 hostaddr[ETH_ALEN]; static struct eth_dev *the_dev; /*-------------------------------------------------------------------------*/ /* * We may not have an RNDIS configuration, but if we do it needs to be * the first one present. That's to make Microsoft's drivers happy, * and to follow DOCSIS 1.0 (cable modem standard). */ static int __init rndis_do_config(struct usb_configuration *c) { /* FIXME alloc iConfiguration string, set it in c->strings */ if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } return rndis_bind_config(c, hostaddr, the_dev); } static struct usb_configuration rndis_config_driver = { .label = "RNDIS", .bConfigurationValue = 2, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; /*-------------------------------------------------------------------------*/ #ifdef CONFIG_USB_ETH_EEM static bool use_eem = 1; #else static bool use_eem; #endif module_param(use_eem, bool, 0); MODULE_PARM_DESC(use_eem, "use CDC EEM mode"); /* * We _always_ have an ECM, CDC Subset, or EEM configuration. */ static int __init eth_do_config(struct usb_configuration *c) { /* FIXME alloc iConfiguration string, set it in c->strings */ if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } if (use_eem) return eem_bind_config(c, the_dev); else if (can_support_ecm(c->cdev->gadget)) return ecm_bind_config(c, hostaddr, the_dev); else return geth_bind_config(c, hostaddr, the_dev); } static struct usb_configuration eth_config_driver = { /* .label = f(hardware) */ .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; /*-------------------------------------------------------------------------*/ static int __init eth_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; int status; /* set up network link layer */ the_dev = gether_setup(cdev->gadget, hostaddr); if (IS_ERR(the_dev)) return PTR_ERR(the_dev); /* set up main config label and device descriptor */ if (use_eem) { /* EEM */ eth_config_driver.label = "CDC Ethernet (EEM)"; device_desc.idVendor = cpu_to_le16(EEM_VENDOR_NUM); device_desc.idProduct = cpu_to_le16(EEM_PRODUCT_NUM); } else if (can_support_ecm(cdev->gadget)) { /* ECM */ eth_config_driver.label = "CDC Ethernet (ECM)"; } else { /* CDC Subset */ eth_config_driver.label = "CDC Subset/SAFE"; device_desc.idVendor = cpu_to_le16(SIMPLE_VENDOR_NUM); device_desc.idProduct = cpu_to_le16(SIMPLE_PRODUCT_NUM); if (!has_rndis()) device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC; } if (has_rndis()) { /* RNDIS plus ECM-or-Subset */ device_desc.idVendor = cpu_to_le16(RNDIS_VENDOR_NUM); device_desc.idProduct = cpu_to_le16(RNDIS_PRODUCT_NUM); device_desc.bNumConfigurations = 2; } /* Allocate string descriptor numbers ... note that string * contents can be overridden by the composite_dev glue. */ status = usb_string_ids_tab(cdev, strings_dev); if (status < 0) goto fail; device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id; device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id; /* register our configuration(s); RNDIS first, if it's used */ if (has_rndis()) { status = usb_add_config(cdev, &rndis_config_driver, rndis_do_config); if (status < 0) goto fail; } status = usb_add_config(cdev, &eth_config_driver, eth_do_config); if (status < 0) goto fail; usb_composite_overwrite_options(cdev, &coverwrite); dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n", DRIVER_DESC); return 0; fail: gether_cleanup(the_dev); return status; } static int __exit eth_unbind(struct usb_composite_dev *cdev) { gether_cleanup(the_dev); return 0; } static __refdata struct usb_composite_driver eth_driver = { .name = "g_ether", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_SUPER, .bind = eth_bind, .unbind = __exit_p(eth_unbind), }; MODULE_DESCRIPTION(PREFIX DRIVER_DESC); MODULE_AUTHOR("David Brownell, Benedikt Spanger"); MODULE_LICENSE("GPL"); static int __init init(void) { return usb_composite_probe(&eth_driver); } module_init(init); static void __exit cleanup(void) { usb_composite_unregister(&eth_driver); } module_exit(cleanup);
gpl-2.0
cybernet/rhel7-kernel
kernel/drivers/media/platform/fsl-viu.c
2097
41218
/* * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved. * * Freescale VIU video driver * * Authors: Hongjun Chen <hong-jun.chen@freescale.com> * Porting to 2.6.35 by DENX Software Engineering, * Anatolij Gustschin <agust@denx.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/clk.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/videobuf-dma-contig.h> #define DRV_NAME "fsl_viu" #define VIU_VERSION "0.5.1" #define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */ #define VIU_VID_MEM_LIMIT 4 /* Video memory limit, in Mb */ /* I2C address of video decoder chip is 0x4A */ #define VIU_VIDEO_DECODER_ADDR 0x25 /* supported controls */ static struct v4l2_queryctrl viu_qctrl[] = { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 255, .step = 1, .default_value = 127, .flags = 0, }, { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, .maximum = 255, .step = 0x1, .default_value = 0x10, .flags = 0, }, { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Saturation", .minimum = 0, .maximum = 255, .step = 0x1, .default_value = 127, .flags = 0, }, { .id = V4L2_CID_HUE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Hue", .minimum = -128, .maximum = 127, .step = 0x1, .default_value = 0, .flags = 0, } }; static int qctl_regs[ARRAY_SIZE(viu_qctrl)]; static int info_level; #define dprintk(level, fmt, arg...) \ do { \ if (level <= info_level) \ printk(KERN_DEBUG "viu: " fmt , ## arg); \ } while (0) /* * Basic structures */ struct viu_fmt { char name[32]; u32 fourcc; /* v4l2 format id */ u32 pixelformat; int depth; }; static struct viu_fmt formats[] = { { .name = "RGB-16 (5/B-6/G-5/R)", .fourcc = V4L2_PIX_FMT_RGB565, .pixelformat = V4L2_PIX_FMT_RGB565, .depth = 16, }, { .name = "RGB-32 (A-R-G-B)", .fourcc = V4L2_PIX_FMT_RGB32, .pixelformat = V4L2_PIX_FMT_RGB32, .depth = 32, } }; struct viu_dev; struct viu_buf; /* buffer for one video frame */ struct viu_buf { /* common v4l buffer stuff -- must be first */ struct videobuf_buffer vb; struct viu_fmt *fmt; }; struct viu_dmaqueue { struct viu_dev *dev; struct list_head active; struct list_head queued; struct timer_list timeout; }; struct viu_status { u32 field_irq; u32 vsync_irq; u32 hsync_irq; u32 vstart_irq; u32 dma_end_irq; u32 error_irq; }; struct viu_reg { u32 status_cfg; u32 luminance; u32 chroma_r; u32 chroma_g; u32 chroma_b; u32 field_base_addr; u32 dma_inc; u32 picture_count; u32 req_alarm; u32 alpha; } __attribute__ ((packed)); struct viu_dev { struct v4l2_device v4l2_dev; struct mutex lock; spinlock_t slock; int users; struct device *dev; /* various device info */ struct video_device *vdev; struct viu_dmaqueue vidq; enum v4l2_field capfield; int field; int first; int dma_done; /* Hardware register area */ struct viu_reg *vr; /* Interrupt vector */ int irq; struct viu_status irqs; /* video overlay */ struct v4l2_framebuffer ovbuf; struct viu_fmt *ovfmt; unsigned int ovenable; enum v4l2_field ovfield; /* crop */ struct v4l2_rect crop_current; /* clock pointer */ struct clk *clk; /* decoder */ struct v4l2_subdev *decoder; v4l2_std_id std; }; struct viu_fh { struct viu_dev *dev; /* video capture */ struct videobuf_queue vb_vidq; spinlock_t vbq_lock; /* spinlock for the videobuf queue */ /* video overlay */ struct v4l2_window win; struct v4l2_clip clips[1]; /* video capture */ struct viu_fmt *fmt; int width, height, sizeimage; enum v4l2_buf_type type; }; static struct viu_reg reg_val; /* * Macro definitions of VIU registers */ /* STATUS_CONFIG register */ enum status_config { SOFT_RST = 1 << 0, ERR_MASK = 0x0f << 4, /* Error code mask */ ERR_NO = 0x00, /* No error */ ERR_DMA_V = 0x01 << 4, /* DMA in vertical active */ ERR_DMA_VB = 0x02 << 4, /* DMA in vertical blanking */ ERR_LINE_TOO_LONG = 0x04 << 4, /* Line too long */ ERR_TOO_MANG_LINES = 0x05 << 4, /* Too many lines in field */ ERR_LINE_TOO_SHORT = 0x06 << 4, /* Line too short */ ERR_NOT_ENOUGH_LINE = 0x07 << 4, /* Not enough lines in field */ ERR_FIFO_OVERFLOW = 0x08 << 4, /* FIFO overflow */ ERR_FIFO_UNDERFLOW = 0x09 << 4, /* FIFO underflow */ ERR_1bit_ECC = 0x0a << 4, /* One bit ECC error */ ERR_MORE_ECC = 0x0b << 4, /* Two/more bits ECC error */ INT_FIELD_EN = 0x01 << 8, /* Enable field interrupt */ INT_VSYNC_EN = 0x01 << 9, /* Enable vsync interrupt */ INT_HSYNC_EN = 0x01 << 10, /* Enable hsync interrupt */ INT_VSTART_EN = 0x01 << 11, /* Enable vstart interrupt */ INT_DMA_END_EN = 0x01 << 12, /* Enable DMA end interrupt */ INT_ERROR_EN = 0x01 << 13, /* Enable error interrupt */ INT_ECC_EN = 0x01 << 14, /* Enable ECC interrupt */ INT_FIELD_STATUS = 0x01 << 16, /* field interrupt status */ INT_VSYNC_STATUS = 0x01 << 17, /* vsync interrupt status */ INT_HSYNC_STATUS = 0x01 << 18, /* hsync interrupt status */ INT_VSTART_STATUS = 0x01 << 19, /* vstart interrupt status */ INT_DMA_END_STATUS = 0x01 << 20, /* DMA end interrupt status */ INT_ERROR_STATUS = 0x01 << 21, /* error interrupt status */ DMA_ACT = 0x01 << 27, /* Enable DMA transfer */ FIELD_NO = 0x01 << 28, /* Field number */ DITHER_ON = 0x01 << 29, /* Dithering is on */ ROUND_ON = 0x01 << 30, /* Round is on */ MODE_32BIT = 0x01 << 31, /* Data in RGBa888, * 0 in RGB565 */ }; #define norm_maxw() 720 #define norm_maxh() 576 #define INT_ALL_STATUS (INT_FIELD_STATUS | INT_VSYNC_STATUS | \ INT_HSYNC_STATUS | INT_VSTART_STATUS | \ INT_DMA_END_STATUS | INT_ERROR_STATUS) #define NUM_FORMATS ARRAY_SIZE(formats) static irqreturn_t viu_intr(int irq, void *dev_id); struct viu_fmt *format_by_fourcc(int fourcc) { int i; for (i = 0; i < NUM_FORMATS; i++) { if (formats[i].pixelformat == fourcc) return formats + i; } dprintk(0, "unknown pixelformat:'%4.4s'\n", (char *)&fourcc); return NULL; } void viu_start_dma(struct viu_dev *dev) { struct viu_reg *vr = dev->vr; dev->field = 0; /* Enable DMA operation */ out_be32(&vr->status_cfg, SOFT_RST); out_be32(&vr->status_cfg, INT_FIELD_EN); } void viu_stop_dma(struct viu_dev *dev) { struct viu_reg *vr = dev->vr; int cnt = 100; u32 status_cfg; out_be32(&vr->status_cfg, 0); /* Clear pending interrupts */ status_cfg = in_be32(&vr->status_cfg); if (status_cfg & 0x3f0000) out_be32(&vr->status_cfg, status_cfg & 0x3f0000); if (status_cfg & DMA_ACT) { do { status_cfg = in_be32(&vr->status_cfg); if (status_cfg & INT_DMA_END_STATUS) break; } while (cnt--); if (cnt < 0) { /* timed out, issue soft reset */ out_be32(&vr->status_cfg, SOFT_RST); out_be32(&vr->status_cfg, 0); } else { /* clear DMA_END and other pending irqs */ out_be32(&vr->status_cfg, status_cfg & 0x3f0000); } } dev->field = 0; } static int restart_video_queue(struct viu_dmaqueue *vidq) { struct viu_buf *buf, *prev; dprintk(1, "%s vidq=0x%08lx\n", __func__, (unsigned long)vidq); if (!list_empty(&vidq->active)) { buf = list_entry(vidq->active.next, struct viu_buf, vb.queue); dprintk(2, "restart_queue [%p/%d]: restart dma\n", buf, buf->vb.i); viu_stop_dma(vidq->dev); /* cancel all outstanding capture requests */ list_for_each_entry_safe(buf, prev, &vidq->active, vb.queue) { list_del(&buf->vb.queue); buf->vb.state = VIDEOBUF_ERROR; wake_up(&buf->vb.done); } mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT); return 0; } prev = NULL; for (;;) { if (list_empty(&vidq->queued)) return 0; buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue); if (prev == NULL) { list_move_tail(&buf->vb.queue, &vidq->active); dprintk(1, "Restarting video dma\n"); viu_stop_dma(vidq->dev); viu_start_dma(vidq->dev); buf->vb.state = VIDEOBUF_ACTIVE; mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT); dprintk(2, "[%p/%d] restart_queue - first active\n", buf, buf->vb.i); } else if (prev->vb.width == buf->vb.width && prev->vb.height == buf->vb.height && prev->fmt == buf->fmt) { list_move_tail(&buf->vb.queue, &vidq->active); buf->vb.state = VIDEOBUF_ACTIVE; dprintk(2, "[%p/%d] restart_queue - move to active\n", buf, buf->vb.i); } else { return 0; } prev = buf; } } static void viu_vid_timeout(unsigned long data) { struct viu_dev *dev = (struct viu_dev *)data; struct viu_buf *buf; struct viu_dmaqueue *vidq = &dev->vidq; while (!list_empty(&vidq->active)) { buf = list_entry(vidq->active.next, struct viu_buf, vb.queue); list_del(&buf->vb.queue); buf->vb.state = VIDEOBUF_ERROR; wake_up(&buf->vb.done); dprintk(1, "viu/0: [%p/%d] timeout\n", buf, buf->vb.i); } restart_video_queue(vidq); } /* * Videobuf operations */ static int buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct viu_fh *fh = vq->priv_data; *size = fh->width * fh->height * fh->fmt->depth >> 3; if (*count == 0) *count = 32; while (*size * *count > VIU_VID_MEM_LIMIT * 1024 * 1024) (*count)--; dprintk(1, "%s, count=%d, size=%d\n", __func__, *count, *size); return 0; } static void free_buffer(struct videobuf_queue *vq, struct viu_buf *buf) { struct videobuf_buffer *vb = &buf->vb; void *vaddr = NULL; BUG_ON(in_interrupt()); videobuf_waiton(vq, &buf->vb, 0, 0); if (vq->int_ops && vq->int_ops->vaddr) vaddr = vq->int_ops->vaddr(vb); if (vaddr) videobuf_dma_contig_free(vq, &buf->vb); buf->vb.state = VIDEOBUF_NEEDS_INIT; } inline int buffer_activate(struct viu_dev *dev, struct viu_buf *buf) { struct viu_reg *vr = dev->vr; int bpp; /* setup the DMA base address */ reg_val.field_base_addr = videobuf_to_dma_contig(&buf->vb); dprintk(1, "buffer_activate [%p/%d]: dma addr 0x%lx\n", buf, buf->vb.i, (unsigned long)reg_val.field_base_addr); /* interlace is on by default, set horizontal DMA increment */ reg_val.status_cfg = 0; bpp = buf->fmt->depth >> 3; switch (bpp) { case 2: reg_val.status_cfg &= ~MODE_32BIT; reg_val.dma_inc = buf->vb.width * 2; break; case 4: reg_val.status_cfg |= MODE_32BIT; reg_val.dma_inc = buf->vb.width * 4; break; default: dprintk(0, "doesn't support color depth(%d)\n", bpp * 8); return -EINVAL; } /* setup picture_count register */ reg_val.picture_count = (buf->vb.height / 2) << 16 | buf->vb.width; reg_val.status_cfg |= DMA_ACT | INT_DMA_END_EN | INT_FIELD_EN; buf->vb.state = VIDEOBUF_ACTIVE; dev->capfield = buf->vb.field; /* reset dma increment if needed */ if (!V4L2_FIELD_HAS_BOTH(buf->vb.field)) reg_val.dma_inc = 0; out_be32(&vr->dma_inc, reg_val.dma_inc); out_be32(&vr->picture_count, reg_val.picture_count); out_be32(&vr->field_base_addr, reg_val.field_base_addr); mod_timer(&dev->vidq.timeout, jiffies + BUFFER_TIMEOUT); return 0; } static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) { struct viu_fh *fh = vq->priv_data; struct viu_buf *buf = container_of(vb, struct viu_buf, vb); int rc; BUG_ON(fh->fmt == NULL); if (fh->width < 48 || fh->width > norm_maxw() || fh->height < 32 || fh->height > norm_maxh()) return -EINVAL; buf->vb.size = (fh->width * fh->height * fh->fmt->depth) >> 3; if (buf->vb.baddr != 0 && buf->vb.bsize < buf->vb.size) return -EINVAL; if (buf->fmt != fh->fmt || buf->vb.width != fh->width || buf->vb.height != fh->height || buf->vb.field != field) { buf->fmt = fh->fmt; buf->vb.width = fh->width; buf->vb.height = fh->height; buf->vb.field = field; } if (buf->vb.state == VIDEOBUF_NEEDS_INIT) { rc = videobuf_iolock(vq, &buf->vb, NULL); if (rc != 0) goto fail; buf->vb.width = fh->width; buf->vb.height = fh->height; buf->vb.field = field; buf->fmt = fh->fmt; } buf->vb.state = VIDEOBUF_PREPARED; return 0; fail: free_buffer(vq, buf); return rc; } static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct viu_buf *buf = container_of(vb, struct viu_buf, vb); struct viu_fh *fh = vq->priv_data; struct viu_dev *dev = fh->dev; struct viu_dmaqueue *vidq = &dev->vidq; struct viu_buf *prev; if (!list_empty(&vidq->queued)) { dprintk(1, "adding vb queue=0x%08lx\n", (unsigned long)&buf->vb.queue); dprintk(1, "vidq pointer 0x%p, queued 0x%p\n", vidq, &vidq->queued); dprintk(1, "dev %p, queued: self %p, next %p, head %p\n", dev, &vidq->queued, vidq->queued.next, vidq->queued.prev); list_add_tail(&buf->vb.queue, &vidq->queued); buf->vb.state = VIDEOBUF_QUEUED; dprintk(2, "[%p/%d] buffer_queue - append to queued\n", buf, buf->vb.i); } else if (list_empty(&vidq->active)) { dprintk(1, "adding vb active=0x%08lx\n", (unsigned long)&buf->vb.queue); list_add_tail(&buf->vb.queue, &vidq->active); buf->vb.state = VIDEOBUF_ACTIVE; mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT); dprintk(2, "[%p/%d] buffer_queue - first active\n", buf, buf->vb.i); buffer_activate(dev, buf); } else { dprintk(1, "adding vb queue2=0x%08lx\n", (unsigned long)&buf->vb.queue); prev = list_entry(vidq->active.prev, struct viu_buf, vb.queue); if (prev->vb.width == buf->vb.width && prev->vb.height == buf->vb.height && prev->fmt == buf->fmt) { list_add_tail(&buf->vb.queue, &vidq->active); buf->vb.state = VIDEOBUF_ACTIVE; dprintk(2, "[%p/%d] buffer_queue - append to active\n", buf, buf->vb.i); } else { list_add_tail(&buf->vb.queue, &vidq->queued); buf->vb.state = VIDEOBUF_QUEUED; dprintk(2, "[%p/%d] buffer_queue - first queued\n", buf, buf->vb.i); } } } static void buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct viu_buf *buf = container_of(vb, struct viu_buf, vb); struct viu_fh *fh = vq->priv_data; struct viu_dev *dev = (struct viu_dev *)fh->dev; viu_stop_dma(dev); free_buffer(vq, buf); } static struct videobuf_queue_ops viu_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .buf_release = buffer_release, }; /* * IOCTL vidioc handling */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strcpy(cap->driver, "viu"); strcpy(cap->card, "viu"); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OVERLAY | V4L2_CAP_READWRITE; return 0; } static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f) { int index = f->index; if (f->index > NUM_FORMATS) return -EINVAL; strlcpy(f->description, formats[index].name, sizeof(f->description)); f->pixelformat = formats[index].fourcc; return 0; } static int vidioc_g_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct viu_fh *fh = priv; f->fmt.pix.width = fh->width; f->fmt.pix.height = fh->height; f->fmt.pix.field = fh->vb_vidq.field; f->fmt.pix.pixelformat = fh->fmt->pixelformat; f->fmt.pix.bytesperline = (f->fmt.pix.width * fh->fmt->depth) >> 3; f->fmt.pix.sizeimage = fh->sizeimage; return 0; } static int vidioc_try_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct viu_fmt *fmt; enum v4l2_field field; unsigned int maxw, maxh; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (!fmt) { dprintk(1, "Fourcc format (0x%08x) invalid.", f->fmt.pix.pixelformat); return -EINVAL; } field = f->fmt.pix.field; if (field == V4L2_FIELD_ANY) { field = V4L2_FIELD_INTERLACED; } else if (field != V4L2_FIELD_INTERLACED) { dprintk(1, "Field type invalid.\n"); return -EINVAL; } maxw = norm_maxw(); maxh = norm_maxh(); f->fmt.pix.field = field; if (f->fmt.pix.height < 32) f->fmt.pix.height = 32; if (f->fmt.pix.height > maxh) f->fmt.pix.height = maxh; if (f->fmt.pix.width < 48) f->fmt.pix.width = 48; if (f->fmt.pix.width > maxw) f->fmt.pix.width = maxw; f->fmt.pix.width &= ~0x03; f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; return 0; } static int vidioc_s_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct viu_fh *fh = priv; int ret; ret = vidioc_try_fmt_cap(file, fh, f); if (ret < 0) return ret; fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat); fh->width = f->fmt.pix.width; fh->height = f->fmt.pix.height; fh->sizeimage = f->fmt.pix.sizeimage; fh->vb_vidq.field = f->fmt.pix.field; fh->type = f->type; dprintk(1, "set to pixelformat '%4.6s'\n", (char *)&fh->fmt->name); return 0; } static int vidioc_g_fmt_overlay(struct file *file, void *priv, struct v4l2_format *f) { struct viu_fh *fh = priv; f->fmt.win = fh->win; return 0; } static int verify_preview(struct viu_dev *dev, struct v4l2_window *win) { enum v4l2_field field; int maxw, maxh; if (dev->ovbuf.base == NULL) return -EINVAL; if (dev->ovfmt == NULL) return -EINVAL; if (win->w.width < 48 || win->w.height < 32) return -EINVAL; field = win->field; maxw = dev->crop_current.width; maxh = dev->crop_current.height; if (field == V4L2_FIELD_ANY) { field = (win->w.height > maxh/2) ? V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; } switch (field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: maxh = maxh / 2; break; case V4L2_FIELD_INTERLACED: break; default: return -EINVAL; } win->field = field; if (win->w.width > maxw) win->w.width = maxw; if (win->w.height > maxh) win->w.height = maxh; return 0; } inline void viu_activate_overlay(struct viu_reg *viu_reg) { struct viu_reg *vr = viu_reg; out_be32(&vr->field_base_addr, reg_val.field_base_addr); out_be32(&vr->dma_inc, reg_val.dma_inc); out_be32(&vr->picture_count, reg_val.picture_count); } static int viu_setup_preview(struct viu_dev *dev, struct viu_fh *fh) { int bpp; dprintk(1, "%s %dx%d %s\n", __func__, fh->win.w.width, fh->win.w.height, dev->ovfmt->name); reg_val.status_cfg = 0; /* setup window */ reg_val.picture_count = (fh->win.w.height / 2) << 16 | fh->win.w.width; /* setup color depth and dma increment */ bpp = dev->ovfmt->depth / 8; switch (bpp) { case 2: reg_val.status_cfg &= ~MODE_32BIT; reg_val.dma_inc = fh->win.w.width * 2; break; case 4: reg_val.status_cfg |= MODE_32BIT; reg_val.dma_inc = fh->win.w.width * 4; break; default: dprintk(0, "device doesn't support color depth(%d)\n", bpp * 8); return -EINVAL; } dev->ovfield = fh->win.field; if (!V4L2_FIELD_HAS_BOTH(dev->ovfield)) reg_val.dma_inc = 0; reg_val.status_cfg |= DMA_ACT | INT_DMA_END_EN | INT_FIELD_EN; /* setup the base address of the overlay buffer */ reg_val.field_base_addr = (u32)dev->ovbuf.base; return 0; } static int vidioc_s_fmt_overlay(struct file *file, void *priv, struct v4l2_format *f) { struct viu_fh *fh = priv; struct viu_dev *dev = (struct viu_dev *)fh->dev; unsigned long flags; int err; err = verify_preview(dev, &f->fmt.win); if (err) return err; fh->win = f->fmt.win; spin_lock_irqsave(&dev->slock, flags); viu_setup_preview(dev, fh); spin_unlock_irqrestore(&dev->slock, flags); return 0; } static int vidioc_try_fmt_overlay(struct file *file, void *priv, struct v4l2_format *f) { return 0; } static int vidioc_overlay(struct file *file, void *priv, unsigned int on) { struct viu_fh *fh = priv; struct viu_dev *dev = (struct viu_dev *)fh->dev; unsigned long flags; if (on) { spin_lock_irqsave(&dev->slock, flags); viu_activate_overlay(dev->vr); dev->ovenable = 1; /* start dma */ viu_start_dma(dev); spin_unlock_irqrestore(&dev->slock, flags); } else { viu_stop_dma(dev); dev->ovenable = 0; } return 0; } int vidioc_g_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *arg) { struct viu_fh *fh = priv; struct viu_dev *dev = fh->dev; struct v4l2_framebuffer *fb = arg; *fb = dev->ovbuf; fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING; return 0; } int vidioc_s_fbuf(struct file *file, void *priv, const struct v4l2_framebuffer *arg) { struct viu_fh *fh = priv; struct viu_dev *dev = fh->dev; const struct v4l2_framebuffer *fb = arg; struct viu_fmt *fmt; if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) return -EPERM; /* check args */ fmt = format_by_fourcc(fb->fmt.pixelformat); if (fmt == NULL) return -EINVAL; /* ok, accept it */ dev->ovbuf = *fb; dev->ovfmt = fmt; if (dev->ovbuf.fmt.bytesperline == 0) { dev->ovbuf.fmt.bytesperline = dev->ovbuf.fmt.width * fmt->depth / 8; } return 0; } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct viu_fh *fh = priv; return videobuf_reqbufs(&fh->vb_vidq, p); } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct viu_fh *fh = priv; return videobuf_querybuf(&fh->vb_vidq, p); } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct viu_fh *fh = priv; return videobuf_qbuf(&fh->vb_vidq, p); } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct viu_fh *fh = priv; return videobuf_dqbuf(&fh->vb_vidq, p, file->f_flags & O_NONBLOCK); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct viu_fh *fh = priv; struct viu_dev *dev = fh->dev; if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (fh->type != i) return -EINVAL; if (dev->ovenable) dev->ovenable = 0; viu_start_dma(fh->dev); return videobuf_streamon(&fh->vb_vidq); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct viu_fh *fh = priv; if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (fh->type != i) return -EINVAL; viu_stop_dma(fh->dev); return videobuf_streamoff(&fh->vb_vidq); } #define decoder_call(viu, o, f, args...) \ v4l2_subdev_call(viu->decoder, o, f, ##args) static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std_id) { struct viu_fh *fh = priv; decoder_call(fh->dev, video, querystd, std_id); return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id) { struct viu_fh *fh = priv; fh->dev->std = id; decoder_call(fh->dev, core, s_std, id); return 0; } static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std_id) { struct viu_fh *fh = priv; *std_id = fh->dev->std; return 0; } /* only one input in this driver */ static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { struct viu_fh *fh = priv; if (inp->index != 0) return -EINVAL; inp->type = V4L2_INPUT_TYPE_CAMERA; inp->std = fh->dev->vdev->tvnorms; strcpy(inp->name, "Camera"); return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int i) { struct viu_fh *fh = priv; if (i > 1) return -EINVAL; decoder_call(fh->dev, video, s_routing, i, 0, 0); return 0; } /* Controls */ static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { int i; for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) { if (qc->id && qc->id == viu_qctrl[i].id) { memcpy(qc, &(viu_qctrl[i]), sizeof(*qc)); return 0; } } return -EINVAL; } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { int i; for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) { if (ctrl->id == viu_qctrl[i].id) { ctrl->value = qctl_regs[i]; return 0; } } return -EINVAL; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { int i; for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) { if (ctrl->id == viu_qctrl[i].id) { if (ctrl->value < viu_qctrl[i].minimum || ctrl->value > viu_qctrl[i].maximum) return -ERANGE; qctl_regs[i] = ctrl->value; return 0; } } return -EINVAL; } inline void viu_activate_next_buf(struct viu_dev *dev, struct viu_dmaqueue *viuq) { struct viu_dmaqueue *vidq = viuq; struct viu_buf *buf; /* launch another DMA operation for an active/queued buffer */ if (!list_empty(&vidq->active)) { buf = list_entry(vidq->active.next, struct viu_buf, vb.queue); dprintk(1, "start another queued buffer: 0x%p\n", buf); buffer_activate(dev, buf); } else if (!list_empty(&vidq->queued)) { buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue); list_del(&buf->vb.queue); dprintk(1, "start another queued buffer: 0x%p\n", buf); list_add_tail(&buf->vb.queue, &vidq->active); buf->vb.state = VIDEOBUF_ACTIVE; buffer_activate(dev, buf); } } inline void viu_default_settings(struct viu_reg *viu_reg) { struct viu_reg *vr = viu_reg; out_be32(&vr->luminance, 0x9512A254); out_be32(&vr->chroma_r, 0x03310000); out_be32(&vr->chroma_g, 0x06600F38); out_be32(&vr->chroma_b, 0x00000409); out_be32(&vr->alpha, 0x000000ff); out_be32(&vr->req_alarm, 0x00000090); dprintk(1, "status reg: 0x%08x, field base: 0x%08x\n", in_be32(&vr->status_cfg), in_be32(&vr->field_base_addr)); } static void viu_overlay_intr(struct viu_dev *dev, u32 status) { struct viu_reg *vr = dev->vr; if (status & INT_DMA_END_STATUS) dev->dma_done = 1; if (status & INT_FIELD_STATUS) { if (dev->dma_done) { u32 addr = reg_val.field_base_addr; dev->dma_done = 0; if (status & FIELD_NO) addr += reg_val.dma_inc; out_be32(&vr->field_base_addr, addr); out_be32(&vr->dma_inc, reg_val.dma_inc); out_be32(&vr->status_cfg, (status & 0xffc0ffff) | (status & INT_ALL_STATUS) | reg_val.status_cfg); } else if (status & INT_VSYNC_STATUS) { out_be32(&vr->status_cfg, (status & 0xffc0ffff) | (status & INT_ALL_STATUS) | reg_val.status_cfg); } } } static void viu_capture_intr(struct viu_dev *dev, u32 status) { struct viu_dmaqueue *vidq = &dev->vidq; struct viu_reg *vr = dev->vr; struct viu_buf *buf; int field_num; int need_two; int dma_done = 0; field_num = status & FIELD_NO; need_two = V4L2_FIELD_HAS_BOTH(dev->capfield); if (status & INT_DMA_END_STATUS) { dma_done = 1; if (((field_num == 0) && (dev->field == 0)) || (field_num && (dev->field == 1))) dev->field++; } if (status & INT_FIELD_STATUS) { dprintk(1, "irq: field %d, done %d\n", !!field_num, dma_done); if (unlikely(dev->first)) { if (field_num == 0) { dev->first = 0; dprintk(1, "activate first buf\n"); viu_activate_next_buf(dev, vidq); } else dprintk(1, "wait field 0\n"); return; } /* setup buffer address for next dma operation */ if (!list_empty(&vidq->active)) { u32 addr = reg_val.field_base_addr; if (field_num && need_two) { addr += reg_val.dma_inc; dprintk(1, "field 1, 0x%lx, dev field %d\n", (unsigned long)addr, dev->field); } out_be32(&vr->field_base_addr, addr); out_be32(&vr->dma_inc, reg_val.dma_inc); out_be32(&vr->status_cfg, (status & 0xffc0ffff) | (status & INT_ALL_STATUS) | reg_val.status_cfg); return; } } if (dma_done && field_num && (dev->field == 2)) { dev->field = 0; buf = list_entry(vidq->active.next, struct viu_buf, vb.queue); dprintk(1, "viu/0: [%p/%d] 0x%lx/0x%lx: dma complete\n", buf, buf->vb.i, (unsigned long)videobuf_to_dma_contig(&buf->vb), (unsigned long)in_be32(&vr->field_base_addr)); if (waitqueue_active(&buf->vb.done)) { list_del(&buf->vb.queue); v4l2_get_timestamp(&buf->vb.ts); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; wake_up(&buf->vb.done); } /* activate next dma buffer */ viu_activate_next_buf(dev, vidq); } } static irqreturn_t viu_intr(int irq, void *dev_id) { struct viu_dev *dev = (struct viu_dev *)dev_id; struct viu_reg *vr = dev->vr; u32 status; u32 error; status = in_be32(&vr->status_cfg); if (status & INT_ERROR_STATUS) { dev->irqs.error_irq++; error = status & ERR_MASK; if (error) dprintk(1, "Err: error(%d), times:%d!\n", error >> 4, dev->irqs.error_irq); /* Clear interrupt error bit and error flags */ out_be32(&vr->status_cfg, (status & 0xffc0ffff) | INT_ERROR_STATUS); } if (status & INT_DMA_END_STATUS) { dev->irqs.dma_end_irq++; dev->dma_done = 1; dprintk(2, "VIU DMA end interrupt times: %d\n", dev->irqs.dma_end_irq); } if (status & INT_HSYNC_STATUS) dev->irqs.hsync_irq++; if (status & INT_FIELD_STATUS) { dev->irqs.field_irq++; dprintk(2, "VIU field interrupt times: %d\n", dev->irqs.field_irq); } if (status & INT_VSTART_STATUS) dev->irqs.vstart_irq++; if (status & INT_VSYNC_STATUS) { dev->irqs.vsync_irq++; dprintk(2, "VIU vsync interrupt times: %d\n", dev->irqs.vsync_irq); } /* clear all pending irqs */ status = in_be32(&vr->status_cfg); out_be32(&vr->status_cfg, (status & 0xffc0ffff) | (status & INT_ALL_STATUS)); if (dev->ovenable) { viu_overlay_intr(dev, status); return IRQ_HANDLED; } /* Capture mode */ viu_capture_intr(dev, status); return IRQ_HANDLED; } /* * File operations for the device */ static int viu_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct viu_dev *dev = video_get_drvdata(vdev); struct viu_fh *fh; struct viu_reg *vr; int minor = vdev->minor; u32 status_cfg; int i; dprintk(1, "viu: open (minor=%d)\n", minor); dev->users++; if (dev->users > 1) { dev->users--; return -EBUSY; } vr = dev->vr; dprintk(1, "open minor=%d type=%s users=%d\n", minor, v4l2_type_names[V4L2_BUF_TYPE_VIDEO_CAPTURE], dev->users); if (mutex_lock_interruptible(&dev->lock)) { dev->users--; return -ERESTARTSYS; } /* allocate and initialize per filehandle data */ fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (!fh) { dev->users--; mutex_unlock(&dev->lock); return -ENOMEM; } file->private_data = fh; fh->dev = dev; fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; fh->fmt = format_by_fourcc(V4L2_PIX_FMT_RGB32); fh->width = norm_maxw(); fh->height = norm_maxh(); dev->crop_current.width = fh->width; dev->crop_current.height = fh->height; /* Put all controls at a sane state */ for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) qctl_regs[i] = viu_qctrl[i].default_value; dprintk(1, "Open: fh=0x%08lx, dev=0x%08lx, dev->vidq=0x%08lx\n", (unsigned long)fh, (unsigned long)dev, (unsigned long)&dev->vidq); dprintk(1, "Open: list_empty queued=%d\n", list_empty(&dev->vidq.queued)); dprintk(1, "Open: list_empty active=%d\n", list_empty(&dev->vidq.active)); viu_default_settings(vr); status_cfg = in_be32(&vr->status_cfg); out_be32(&vr->status_cfg, status_cfg & ~(INT_VSYNC_EN | INT_HSYNC_EN | INT_FIELD_EN | INT_VSTART_EN | INT_DMA_END_EN | INT_ERROR_EN | INT_ECC_EN)); status_cfg = in_be32(&vr->status_cfg); out_be32(&vr->status_cfg, status_cfg | INT_ALL_STATUS); spin_lock_init(&fh->vbq_lock); videobuf_queue_dma_contig_init(&fh->vb_vidq, &viu_video_qops, dev->dev, &fh->vbq_lock, fh->type, V4L2_FIELD_INTERLACED, sizeof(struct viu_buf), fh, &fh->dev->lock); mutex_unlock(&dev->lock); return 0; } static ssize_t viu_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct viu_fh *fh = file->private_data; struct viu_dev *dev = fh->dev; int ret = 0; dprintk(2, "%s\n", __func__); if (dev->ovenable) dev->ovenable = 0; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { if (mutex_lock_interruptible(&dev->lock)) return -ERESTARTSYS; viu_start_dma(dev); ret = videobuf_read_stream(&fh->vb_vidq, data, count, ppos, 0, file->f_flags & O_NONBLOCK); mutex_unlock(&dev->lock); return ret; } return 0; } static unsigned int viu_poll(struct file *file, struct poll_table_struct *wait) { struct viu_fh *fh = file->private_data; struct videobuf_queue *q = &fh->vb_vidq; struct viu_dev *dev = fh->dev; unsigned int res; if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type) return POLLERR; mutex_lock(&dev->lock); res = videobuf_poll_stream(file, q, wait); mutex_unlock(&dev->lock); return res; } static int viu_release(struct file *file) { struct viu_fh *fh = file->private_data; struct viu_dev *dev = fh->dev; int minor = video_devdata(file)->minor; mutex_lock(&dev->lock); viu_stop_dma(dev); videobuf_stop(&fh->vb_vidq); videobuf_mmap_free(&fh->vb_vidq); mutex_unlock(&dev->lock); kfree(fh); dev->users--; dprintk(1, "close (minor=%d, users=%d)\n", minor, dev->users); return 0; } void viu_reset(struct viu_reg *reg) { out_be32(&reg->status_cfg, 0); out_be32(&reg->luminance, 0x9512a254); out_be32(&reg->chroma_r, 0x03310000); out_be32(&reg->chroma_g, 0x06600f38); out_be32(&reg->chroma_b, 0x00000409); out_be32(&reg->field_base_addr, 0); out_be32(&reg->dma_inc, 0); out_be32(&reg->picture_count, 0x01e002d0); out_be32(&reg->req_alarm, 0x00000090); out_be32(&reg->alpha, 0x000000ff); } static int viu_mmap(struct file *file, struct vm_area_struct *vma) { struct viu_fh *fh = file->private_data; struct viu_dev *dev = fh->dev; int ret; dprintk(1, "mmap called, vma=0x%08lx\n", (unsigned long)vma); if (mutex_lock_interruptible(&dev->lock)) return -ERESTARTSYS; ret = videobuf_mmap_mapper(&fh->vb_vidq, vma); mutex_unlock(&dev->lock); dprintk(1, "vma start=0x%08lx, size=%ld, ret=%d\n", (unsigned long)vma->vm_start, (unsigned long)vma->vm_end-(unsigned long)vma->vm_start, ret); return ret; } static struct v4l2_file_operations viu_fops = { .owner = THIS_MODULE, .open = viu_open, .release = viu_release, .read = viu_read, .poll = viu_poll, .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */ .mmap = viu_mmap, }; static const struct v4l2_ioctl_ops viu_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_cap, .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt, .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_overlay, .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_overlay, .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_overlay, .vidioc_overlay = vidioc_overlay, .vidioc_g_fbuf = vidioc_g_fbuf, .vidioc_s_fbuf = vidioc_s_fbuf, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_querystd = vidioc_querystd, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, }; static struct video_device viu_template = { .name = "FSL viu", .fops = &viu_fops, .minor = -1, .ioctl_ops = &viu_ioctl_ops, .release = video_device_release, .tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL, .current_norm = V4L2_STD_NTSC_M, }; static int viu_of_probe(struct platform_device *op) { struct viu_dev *viu_dev; struct video_device *vdev; struct resource r; struct viu_reg __iomem *viu_regs; struct i2c_adapter *ad; int ret, viu_irq; ret = of_address_to_resource(op->dev.of_node, 0, &r); if (ret) { dev_err(&op->dev, "Can't parse device node resource\n"); return -ENODEV; } viu_irq = irq_of_parse_and_map(op->dev.of_node, 0); if (viu_irq == NO_IRQ) { dev_err(&op->dev, "Error while mapping the irq\n"); return -EINVAL; } /* request mem region */ if (!devm_request_mem_region(&op->dev, r.start, sizeof(struct viu_reg), DRV_NAME)) { dev_err(&op->dev, "Error while requesting mem region\n"); ret = -EBUSY; goto err; } /* remap registers */ viu_regs = devm_ioremap(&op->dev, r.start, sizeof(struct viu_reg)); if (!viu_regs) { dev_err(&op->dev, "Can't map register set\n"); ret = -ENOMEM; goto err; } /* Prepare our private structure */ viu_dev = devm_kzalloc(&op->dev, sizeof(struct viu_dev), GFP_ATOMIC); if (!viu_dev) { dev_err(&op->dev, "Can't allocate private structure\n"); ret = -ENOMEM; goto err; } viu_dev->vr = viu_regs; viu_dev->irq = viu_irq; viu_dev->dev = &op->dev; /* init video dma queues */ INIT_LIST_HEAD(&viu_dev->vidq.active); INIT_LIST_HEAD(&viu_dev->vidq.queued); snprintf(viu_dev->v4l2_dev.name, sizeof(viu_dev->v4l2_dev.name), "%s", "VIU"); ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev); if (ret < 0) { dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret); goto err; } ad = i2c_get_adapter(0); viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad, "saa7113", VIU_VIDEO_DECODER_ADDR, NULL); viu_dev->vidq.timeout.function = viu_vid_timeout; viu_dev->vidq.timeout.data = (unsigned long)viu_dev; init_timer(&viu_dev->vidq.timeout); viu_dev->first = 1; /* Allocate memory for video device */ vdev = video_device_alloc(); if (vdev == NULL) { ret = -ENOMEM; goto err_vdev; } memcpy(vdev, &viu_template, sizeof(viu_template)); vdev->v4l2_dev = &viu_dev->v4l2_dev; viu_dev->vdev = vdev; /* initialize locks */ mutex_init(&viu_dev->lock); viu_dev->vdev->lock = &viu_dev->lock; spin_lock_init(&viu_dev->slock); video_set_drvdata(viu_dev->vdev, viu_dev); mutex_lock(&viu_dev->lock); ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1); if (ret < 0) { video_device_release(viu_dev->vdev); goto err_vdev; } /* enable VIU clock */ viu_dev->clk = clk_get(&op->dev, "viu_clk"); if (IS_ERR(viu_dev->clk)) { dev_err(&op->dev, "failed to find the clock module!\n"); ret = -ENODEV; goto err_clk; } else { clk_enable(viu_dev->clk); } /* reset VIU module */ viu_reset(viu_dev->vr); /* install interrupt handler */ if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) { dev_err(&op->dev, "Request VIU IRQ failed.\n"); ret = -ENODEV; goto err_irq; } mutex_unlock(&viu_dev->lock); dev_info(&op->dev, "Freescale VIU Video Capture Board\n"); return ret; err_irq: clk_disable(viu_dev->clk); clk_put(viu_dev->clk); err_clk: video_unregister_device(viu_dev->vdev); err_vdev: mutex_unlock(&viu_dev->lock); i2c_put_adapter(ad); v4l2_device_unregister(&viu_dev->v4l2_dev); err: irq_dispose_mapping(viu_irq); return ret; } static int viu_of_remove(struct platform_device *op) { struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev); struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev); struct v4l2_subdev *sdev = list_entry(v4l2_dev->subdevs.next, struct v4l2_subdev, list); struct i2c_client *client = v4l2_get_subdevdata(sdev); free_irq(dev->irq, (void *)dev); irq_dispose_mapping(dev->irq); clk_disable(dev->clk); clk_put(dev->clk); video_unregister_device(dev->vdev); i2c_put_adapter(client->adapter); v4l2_device_unregister(&dev->v4l2_dev); return 0; } #ifdef CONFIG_PM static int viu_suspend(struct platform_device *op, pm_message_t state) { struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev); struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev); clk_disable(dev->clk); return 0; } static int viu_resume(struct platform_device *op) { struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev); struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev); clk_enable(dev->clk); return 0; } #endif /* * Initialization and module stuff */ static struct of_device_id mpc512x_viu_of_match[] = { { .compatible = "fsl,mpc5121-viu", }, {}, }; MODULE_DEVICE_TABLE(of, mpc512x_viu_of_match); static struct platform_driver viu_of_platform_driver = { .probe = viu_of_probe, .remove = viu_of_remove, #ifdef CONFIG_PM .suspend = viu_suspend, .resume = viu_resume, #endif .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .of_match_table = mpc512x_viu_of_match, }, }; module_platform_driver(viu_of_platform_driver); MODULE_DESCRIPTION("Freescale Video-In(VIU)"); MODULE_AUTHOR("Hongjun Chen"); MODULE_LICENSE("GPL"); MODULE_VERSION(VIU_VERSION);
gpl-2.0
alma-siwon/ALMA-Kernel-AOSP
arch/arm/mach-msm/board-sapphire.c
3633
2842
/* linux/arch/arm/mach-msm/board-sapphire.c * Copyright (C) 2007-2009 HTC Corporation. * Author: Thomas Tsai <thomas_tsai@htc.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/device.h> #include <linux/delay.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/flash.h> #include <mach/system.h> #include <mach/vreg.h> #include <mach/board.h> #include <mach/proc_comm.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/setup.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include "gpio_chip.h" #include "board-sapphire.h" #include "devices.h" void msm_init_irq(void); void msm_init_gpio(void); static struct platform_device *devices[] __initdata = { &msm_device_smd, &msm_device_dmov, &msm_device_nand, &msm_device_uart1, &msm_device_uart3, }; extern struct sys_timer msm_timer; static void __init sapphire_init_irq(void) { msm_init_irq(); } static void __init sapphire_init(void) { platform_add_devices(devices, ARRAY_SIZE(devices)); } static struct map_desc sapphire_io_desc[] __initdata = { { .virtual = SAPPHIRE_CPLD_BASE, .pfn = __phys_to_pfn(SAPPHIRE_CPLD_START), .length = SAPPHIRE_CPLD_SIZE, .type = MT_DEVICE_NONSHARED } }; static void __init sapphire_fixup(struct tag *tags, char **cmdline, struct meminfo *mi) { int smi_sz = parse_tag_smi((const struct tag *)tags); mi->nr_banks = 1; mi->bank[0].start = PHYS_OFFSET; mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET); if (smi_sz == 32) { mi->bank[0].size = (84*1024*1024); } else if (smi_sz == 64) { mi->bank[0].size = (101*1024*1024); } else { /* Give a default value when not get smi size */ smi_sz = 64; mi->bank[0].size = (101*1024*1024); } } static void __init sapphire_map_io(void) { msm_map_common_io(); iotable_init(sapphire_io_desc, ARRAY_SIZE(sapphire_io_desc)); msm_clock_init(); } MACHINE_START(SAPPHIRE, "sapphire") /* Maintainer: Brian Swetland <swetland@google.com> */ .atag_offset = 0x100, .fixup = sapphire_fixup, .map_io = sapphire_map_io, .init_irq = sapphire_init_irq, .init_machine = sapphire_init, .timer = &msm_timer, MACHINE_END
gpl-2.0
gunine/boeffla-kernel-jb-lte
drivers/input/misc/uinput.c
7473
19714
/* * User level driver support for input subsystem * * Heavily based on evdev.c by Vojtech Pavlik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org> * * Changes/Revisions: * 0.3 09/04/2006 (Anssi Hannula <anssi.hannula@gmail.com>) * - updated ff support for the changes in kernel interface * - added MODULE_VERSION * 0.2 16/10/2004 (Micah Dowty <micah@navi.cx>) * - added force feedback support * - added UI_SET_PHYS * 0.1 20/06/2002 * - first public version */ #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uinput.h> #include <linux/input/mt.h> #include "../input-compat.h" static int uinput_dev_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct uinput_device *udev = input_get_drvdata(dev); udev->buff[udev->head].type = type; udev->buff[udev->head].code = code; udev->buff[udev->head].value = value; do_gettimeofday(&udev->buff[udev->head].time); udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE; wake_up_interruptible(&udev->waitq); return 0; } /* Atomically allocate an ID for the given request. Returns 0 on success. */ static int uinput_request_alloc_id(struct uinput_device *udev, struct uinput_request *request) { int id; int err = -1; spin_lock(&udev->requests_lock); for (id = 0; id < UINPUT_NUM_REQUESTS; id++) { if (!udev->requests[id]) { request->id = id; udev->requests[id] = request; err = 0; break; } } spin_unlock(&udev->requests_lock); return err; } static struct uinput_request *uinput_request_find(struct uinput_device *udev, int id) { /* Find an input request, by ID. Returns NULL if the ID isn't valid. */ if (id >= UINPUT_NUM_REQUESTS || id < 0) return NULL; return udev->requests[id]; } static inline int uinput_request_reserve_slot(struct uinput_device *udev, struct uinput_request *request) { /* Allocate slot. If none are available right away, wait. */ return wait_event_interruptible(udev->requests_waitq, !uinput_request_alloc_id(udev, request)); } static void uinput_request_done(struct uinput_device *udev, struct uinput_request *request) { /* Mark slot as available */ udev->requests[request->id] = NULL; wake_up(&udev->requests_waitq); complete(&request->done); } static int uinput_request_submit(struct uinput_device *udev, struct uinput_request *request) { int retval; retval = uinput_request_reserve_slot(udev, request); if (retval) return retval; retval = mutex_lock_interruptible(&udev->mutex); if (retval) return retval; if (udev->state != UIST_CREATED) { retval = -ENODEV; goto out; } /* Tell our userspace app about this new request by queueing an input event */ uinput_dev_event(udev->dev, EV_UINPUT, request->code, request->id); out: mutex_unlock(&udev->mutex); return retval; } /* * Fail all ouitstanding requests so handlers don't wait for the userspace * to finish processing them. */ static void uinput_flush_requests(struct uinput_device *udev) { struct uinput_request *request; int i; spin_lock(&udev->requests_lock); for (i = 0; i < UINPUT_NUM_REQUESTS; i++) { request = udev->requests[i]; if (request) { request->retval = -ENODEV; uinput_request_done(udev, request); } } spin_unlock(&udev->requests_lock); } static void uinput_dev_set_gain(struct input_dev *dev, u16 gain) { uinput_dev_event(dev, EV_FF, FF_GAIN, gain); } static void uinput_dev_set_autocenter(struct input_dev *dev, u16 magnitude) { uinput_dev_event(dev, EV_FF, FF_AUTOCENTER, magnitude); } static int uinput_dev_playback(struct input_dev *dev, int effect_id, int value) { return uinput_dev_event(dev, EV_FF, effect_id, value); } static int uinput_dev_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old) { struct uinput_device *udev = input_get_drvdata(dev); struct uinput_request request; int retval; /* * uinput driver does not currently support periodic effects with * custom waveform since it does not have a way to pass buffer of * samples (custom_data) to userspace. If ever there is a device * supporting custom waveforms we would need to define an additional * ioctl (UI_UPLOAD_SAMPLES) but for now we just bail out. */ if (effect->type == FF_PERIODIC && effect->u.periodic.waveform == FF_CUSTOM) return -EINVAL; request.id = -1; init_completion(&request.done); request.code = UI_FF_UPLOAD; request.u.upload.effect = effect; request.u.upload.old = old; retval = uinput_request_submit(udev, &request); if (!retval) { wait_for_completion(&request.done); retval = request.retval; } return retval; } static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id) { struct uinput_device *udev = input_get_drvdata(dev); struct uinput_request request; int retval; if (!test_bit(EV_FF, dev->evbit)) return -ENOSYS; request.id = -1; init_completion(&request.done); request.code = UI_FF_ERASE; request.u.effect_id = effect_id; retval = uinput_request_submit(udev, &request); if (!retval) { wait_for_completion(&request.done); retval = request.retval; } return retval; } static void uinput_destroy_device(struct uinput_device *udev) { const char *name, *phys; struct input_dev *dev = udev->dev; enum uinput_state old_state = udev->state; udev->state = UIST_NEW_DEVICE; if (dev) { name = dev->name; phys = dev->phys; if (old_state == UIST_CREATED) { uinput_flush_requests(udev); input_unregister_device(dev); } else { input_free_device(dev); } kfree(name); kfree(phys); udev->dev = NULL; } } static int uinput_create_device(struct uinput_device *udev) { struct input_dev *dev = udev->dev; int error; if (udev->state != UIST_SETUP_COMPLETE) { printk(KERN_DEBUG "%s: write device info first\n", UINPUT_NAME); return -EINVAL; } if (udev->ff_effects_max) { error = input_ff_create(dev, udev->ff_effects_max); if (error) goto fail1; dev->ff->upload = uinput_dev_upload_effect; dev->ff->erase = uinput_dev_erase_effect; dev->ff->playback = uinput_dev_playback; dev->ff->set_gain = uinput_dev_set_gain; dev->ff->set_autocenter = uinput_dev_set_autocenter; } error = input_register_device(udev->dev); if (error) goto fail2; udev->state = UIST_CREATED; return 0; fail2: input_ff_destroy(dev); fail1: uinput_destroy_device(udev); return error; } static int uinput_open(struct inode *inode, struct file *file) { struct uinput_device *newdev; newdev = kzalloc(sizeof(struct uinput_device), GFP_KERNEL); if (!newdev) return -ENOMEM; mutex_init(&newdev->mutex); spin_lock_init(&newdev->requests_lock); init_waitqueue_head(&newdev->requests_waitq); init_waitqueue_head(&newdev->waitq); newdev->state = UIST_NEW_DEVICE; file->private_data = newdev; nonseekable_open(inode, file); return 0; } static int uinput_validate_absbits(struct input_dev *dev) { unsigned int cnt; int retval = 0; for (cnt = 0; cnt < ABS_CNT; cnt++) { int min, max; if (!test_bit(cnt, dev->absbit)) continue; min = input_abs_get_min(dev, cnt); max = input_abs_get_max(dev, cnt); if ((min != 0 || max != 0) && max <= min) { printk(KERN_DEBUG "%s: invalid abs[%02x] min:%d max:%d\n", UINPUT_NAME, cnt, input_abs_get_min(dev, cnt), input_abs_get_max(dev, cnt)); retval = -EINVAL; break; } if (input_abs_get_flat(dev, cnt) > input_abs_get_max(dev, cnt) - input_abs_get_min(dev, cnt)) { printk(KERN_DEBUG "%s: abs_flat #%02x out of range: %d " "(min:%d/max:%d)\n", UINPUT_NAME, cnt, input_abs_get_flat(dev, cnt), input_abs_get_min(dev, cnt), input_abs_get_max(dev, cnt)); retval = -EINVAL; break; } } return retval; } static int uinput_allocate_device(struct uinput_device *udev) { udev->dev = input_allocate_device(); if (!udev->dev) return -ENOMEM; udev->dev->event = uinput_dev_event; input_set_drvdata(udev->dev, udev); return 0; } static int uinput_setup_device(struct uinput_device *udev, const char __user *buffer, size_t count) { struct uinput_user_dev *user_dev; struct input_dev *dev; int i; int retval; if (count != sizeof(struct uinput_user_dev)) return -EINVAL; if (!udev->dev) { retval = uinput_allocate_device(udev); if (retval) return retval; } dev = udev->dev; user_dev = memdup_user(buffer, sizeof(struct uinput_user_dev)); if (IS_ERR(user_dev)) return PTR_ERR(user_dev); udev->ff_effects_max = user_dev->ff_effects_max; /* Ensure name is filled in */ if (!user_dev->name[0]) { retval = -EINVAL; goto exit; } kfree(dev->name); dev->name = kstrndup(user_dev->name, UINPUT_MAX_NAME_SIZE, GFP_KERNEL); if (!dev->name) { retval = -ENOMEM; goto exit; } dev->id.bustype = user_dev->id.bustype; dev->id.vendor = user_dev->id.vendor; dev->id.product = user_dev->id.product; dev->id.version = user_dev->id.version; for (i = 0; i < ABS_CNT; i++) { input_abs_set_max(dev, i, user_dev->absmax[i]); input_abs_set_min(dev, i, user_dev->absmin[i]); input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); input_abs_set_flat(dev, i, user_dev->absflat[i]); } /* check if absmin/absmax/absfuzz/absflat are filled as * told in Documentation/input/input-programming.txt */ if (test_bit(EV_ABS, dev->evbit)) { retval = uinput_validate_absbits(dev); if (retval < 0) goto exit; if (test_bit(ABS_MT_SLOT, dev->absbit)) { int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; input_mt_init_slots(dev, nslot); } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { input_set_events_per_packet(dev, 60); } } udev->state = UIST_SETUP_COMPLETE; retval = count; exit: kfree(user_dev); return retval; } static inline ssize_t uinput_inject_event(struct uinput_device *udev, const char __user *buffer, size_t count) { struct input_event ev; if (count < input_event_size()) return -EINVAL; if (input_event_from_user(buffer, &ev)) return -EFAULT; input_event(udev->dev, ev.type, ev.code, ev.value); return input_event_size(); } static ssize_t uinput_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct uinput_device *udev = file->private_data; int retval; retval = mutex_lock_interruptible(&udev->mutex); if (retval) return retval; retval = udev->state == UIST_CREATED ? uinput_inject_event(udev, buffer, count) : uinput_setup_device(udev, buffer, count); mutex_unlock(&udev->mutex); return retval; } static ssize_t uinput_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct uinput_device *udev = file->private_data; int retval = 0; if (udev->state != UIST_CREATED) return -ENODEV; if (udev->head == udev->tail && (file->f_flags & O_NONBLOCK)) return -EAGAIN; retval = wait_event_interruptible(udev->waitq, udev->head != udev->tail || udev->state != UIST_CREATED); if (retval) return retval; retval = mutex_lock_interruptible(&udev->mutex); if (retval) return retval; if (udev->state != UIST_CREATED) { retval = -ENODEV; goto out; } while (udev->head != udev->tail && retval + input_event_size() <= count) { if (input_event_to_user(buffer + retval, &udev->buff[udev->tail])) { retval = -EFAULT; goto out; } udev->tail = (udev->tail + 1) % UINPUT_BUFFER_SIZE; retval += input_event_size(); } out: mutex_unlock(&udev->mutex); return retval; } static unsigned int uinput_poll(struct file *file, poll_table *wait) { struct uinput_device *udev = file->private_data; poll_wait(file, &udev->waitq, wait); if (udev->head != udev->tail) return POLLIN | POLLRDNORM; return 0; } static int uinput_release(struct inode *inode, struct file *file) { struct uinput_device *udev = file->private_data; uinput_destroy_device(udev); kfree(udev); return 0; } #ifdef CONFIG_COMPAT struct uinput_ff_upload_compat { int request_id; int retval; struct ff_effect_compat effect; struct ff_effect_compat old; }; static int uinput_ff_upload_to_user(char __user *buffer, const struct uinput_ff_upload *ff_up) { if (INPUT_COMPAT_TEST) { struct uinput_ff_upload_compat ff_up_compat; ff_up_compat.request_id = ff_up->request_id; ff_up_compat.retval = ff_up->retval; /* * It so happens that the pointer that gives us the trouble * is the last field in the structure. Since we don't support * custom waveforms in uinput anyway we can just copy the whole * thing (to the compat size) and ignore the pointer. */ memcpy(&ff_up_compat.effect, &ff_up->effect, sizeof(struct ff_effect_compat)); memcpy(&ff_up_compat.old, &ff_up->old, sizeof(struct ff_effect_compat)); if (copy_to_user(buffer, &ff_up_compat, sizeof(struct uinput_ff_upload_compat))) return -EFAULT; } else { if (copy_to_user(buffer, ff_up, sizeof(struct uinput_ff_upload))) return -EFAULT; } return 0; } static int uinput_ff_upload_from_user(const char __user *buffer, struct uinput_ff_upload *ff_up) { if (INPUT_COMPAT_TEST) { struct uinput_ff_upload_compat ff_up_compat; if (copy_from_user(&ff_up_compat, buffer, sizeof(struct uinput_ff_upload_compat))) return -EFAULT; ff_up->request_id = ff_up_compat.request_id; ff_up->retval = ff_up_compat.retval; memcpy(&ff_up->effect, &ff_up_compat.effect, sizeof(struct ff_effect_compat)); memcpy(&ff_up->old, &ff_up_compat.old, sizeof(struct ff_effect_compat)); } else { if (copy_from_user(ff_up, buffer, sizeof(struct uinput_ff_upload))) return -EFAULT; } return 0; } #else static int uinput_ff_upload_to_user(char __user *buffer, const struct uinput_ff_upload *ff_up) { if (copy_to_user(buffer, ff_up, sizeof(struct uinput_ff_upload))) return -EFAULT; return 0; } static int uinput_ff_upload_from_user(const char __user *buffer, struct uinput_ff_upload *ff_up) { if (copy_from_user(ff_up, buffer, sizeof(struct uinput_ff_upload))) return -EFAULT; return 0; } #endif #define uinput_set_bit(_arg, _bit, _max) \ ({ \ int __ret = 0; \ if (udev->state == UIST_CREATED) \ __ret = -EINVAL; \ else if ((_arg) > (_max)) \ __ret = -EINVAL; \ else set_bit((_arg), udev->dev->_bit); \ __ret; \ }) static long uinput_ioctl_handler(struct file *file, unsigned int cmd, unsigned long arg, void __user *p) { int retval; struct uinput_device *udev = file->private_data; struct uinput_ff_upload ff_up; struct uinput_ff_erase ff_erase; struct uinput_request *req; char *phys; retval = mutex_lock_interruptible(&udev->mutex); if (retval) return retval; if (!udev->dev) { retval = uinput_allocate_device(udev); if (retval) goto out; } switch (cmd) { case UI_DEV_CREATE: retval = uinput_create_device(udev); break; case UI_DEV_DESTROY: uinput_destroy_device(udev); break; case UI_SET_EVBIT: retval = uinput_set_bit(arg, evbit, EV_MAX); break; case UI_SET_KEYBIT: retval = uinput_set_bit(arg, keybit, KEY_MAX); break; case UI_SET_RELBIT: retval = uinput_set_bit(arg, relbit, REL_MAX); break; case UI_SET_ABSBIT: retval = uinput_set_bit(arg, absbit, ABS_MAX); break; case UI_SET_MSCBIT: retval = uinput_set_bit(arg, mscbit, MSC_MAX); break; case UI_SET_LEDBIT: retval = uinput_set_bit(arg, ledbit, LED_MAX); break; case UI_SET_SNDBIT: retval = uinput_set_bit(arg, sndbit, SND_MAX); break; case UI_SET_FFBIT: retval = uinput_set_bit(arg, ffbit, FF_MAX); break; case UI_SET_SWBIT: retval = uinput_set_bit(arg, swbit, SW_MAX); break; case UI_SET_PROPBIT: retval = uinput_set_bit(arg, propbit, INPUT_PROP_MAX); break; case UI_SET_PHYS: if (udev->state == UIST_CREATED) { retval = -EINVAL; goto out; } phys = strndup_user(p, 1024); if (IS_ERR(phys)) { retval = PTR_ERR(phys); goto out; } kfree(udev->dev->phys); udev->dev->phys = phys; break; case UI_BEGIN_FF_UPLOAD: retval = uinput_ff_upload_from_user(p, &ff_up); if (retval) break; req = uinput_request_find(udev, ff_up.request_id); if (!req || req->code != UI_FF_UPLOAD || !req->u.upload.effect) { retval = -EINVAL; break; } ff_up.retval = 0; ff_up.effect = *req->u.upload.effect; if (req->u.upload.old) ff_up.old = *req->u.upload.old; else memset(&ff_up.old, 0, sizeof(struct ff_effect)); retval = uinput_ff_upload_to_user(p, &ff_up); break; case UI_BEGIN_FF_ERASE: if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) { retval = -EFAULT; break; } req = uinput_request_find(udev, ff_erase.request_id); if (!req || req->code != UI_FF_ERASE) { retval = -EINVAL; break; } ff_erase.retval = 0; ff_erase.effect_id = req->u.effect_id; if (copy_to_user(p, &ff_erase, sizeof(ff_erase))) { retval = -EFAULT; break; } break; case UI_END_FF_UPLOAD: retval = uinput_ff_upload_from_user(p, &ff_up); if (retval) break; req = uinput_request_find(udev, ff_up.request_id); if (!req || req->code != UI_FF_UPLOAD || !req->u.upload.effect) { retval = -EINVAL; break; } req->retval = ff_up.retval; uinput_request_done(udev, req); break; case UI_END_FF_ERASE: if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) { retval = -EFAULT; break; } req = uinput_request_find(udev, ff_erase.request_id); if (!req || req->code != UI_FF_ERASE) { retval = -EINVAL; break; } req->retval = ff_erase.retval; uinput_request_done(udev, req); break; default: retval = -EINVAL; } out: mutex_unlock(&udev->mutex); return retval; } static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return uinput_ioctl_handler(file, cmd, arg, (void __user *)arg); } #ifdef CONFIG_COMPAT static long uinput_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg)); } #endif static const struct file_operations uinput_fops = { .owner = THIS_MODULE, .open = uinput_open, .release = uinput_release, .read = uinput_read, .write = uinput_write, .poll = uinput_poll, .unlocked_ioctl = uinput_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = uinput_compat_ioctl, #endif .llseek = no_llseek, }; static struct miscdevice uinput_misc = { .fops = &uinput_fops, .minor = UINPUT_MINOR, .name = UINPUT_NAME, }; MODULE_ALIAS_MISCDEV(UINPUT_MINOR); MODULE_ALIAS("devname:" UINPUT_NAME); static int __init uinput_init(void) { return misc_register(&uinput_misc); } static void __exit uinput_exit(void) { misc_deregister(&uinput_misc); } MODULE_AUTHOR("Aristeu Sergio Rozanski Filho"); MODULE_DESCRIPTION("User level driver support for input subsystem"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.3"); module_init(uinput_init); module_exit(uinput_exit);
gpl-2.0
MotoXoomKernelFlavors/tiamat-kernel-mod
drivers/staging/comedi/drivers/adq12b.c
7985
11747
/* comedi/drivers/adq12b.c driver for MicroAxial ADQ12-B data acquisition and control card COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: adq12b Description: driver for MicroAxial ADQ12-B data acquisition and control card Devices: [MicroAxial] ADQ12-B (adq12b) Author: jeremy theler <thelerg@ib.cnea.gov.ar> Updated: Thu, 21 Feb 2008 02:56:27 -0300 Status: works Driver for the acquisition card ADQ12-B (without any add-on). - Analog input is subdevice 0 (16 channels single-ended or 8 differential) - Digital input is subdevice 1 (5 channels) - Digital output is subdevice 1 (8 channels) - The PACER is not supported in this version If you do not specify any options, they will default to # comedi_config /dev/comedi0 adq12b 0x300,0,0 option 1: I/O base address. The following table is provided as a help of the hardware jumpers. address jumper JADR 0x300 1 (factory default) 0x320 2 0x340 3 0x360 4 0x380 5 0x3A0 6 option 2: unipolar/bipolar ADC selection: 0 -> bipolar, 1 -> unipolar selection comedi_config option JUB bipolar 0 2-3 (factory default) unipolar 1 1-2 option 3: single-ended/differential AI selection: 0 -> SE, 1 -> differential selection comedi_config option JCHA JCHB single-ended 0 1-2 1-2 (factory default) differential 1 2-3 2-3 written by jeremy theler <thelerg@ib.cnea.gov.ar> instituto balseiro commission nacional de energia atomica universidad nacional de cuyo argentina 21-feb-2008 + changed supported devices string (missused the [] and ()) 13-oct-2007 + first try */ #include "../comedidev.h" /* address scheme (page 2.17 of the manual) */ #define ADQ12B_SIZE 16 #define ADQ12B_CTREG 0x00 #define ADQ12B_STINR 0x00 #define ADQ12B_OUTBR 0x04 #define ADQ12B_ADLOW 0x08 #define ADQ12B_ADHIG 0x09 #define ADQ12B_CONT0 0x0c #define ADQ12B_CONT1 0x0d #define ADQ12B_CONT2 0x0e #define ADQ12B_COWORD 0x0f /* mask of the bit at STINR to check end of conversion */ #define ADQ12B_EOC 0x20 #define TIMEOUT 20 /* available ranges through the PGA gains */ static const struct comedi_lrange range_adq12b_ai_bipolar = { 4, { BIP_RANGE(5), BIP_RANGE(2), BIP_RANGE(1), BIP_RANGE(0.5) } }; static const struct comedi_lrange range_adq12b_ai_unipolar = { 4, { UNI_RANGE(5), UNI_RANGE(2), UNI_RANGE(1), UNI_RANGE (0.5) } }; struct adq12b_board { const char *name; int ai_se_chans; int ai_diff_chans; int ai_bits; int di_chans; int do_chans; }; static const struct adq12b_board adq12b_boards[] = { { .name = "adq12b", .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .di_chans = 5, .do_chans = 8} /* potentially, more adq-based deviced will be added */ /*, .name = "adq12b", .ai_chans = 16, // this is just for reference, hardcoded again later .ai_bits = 12, .di_chans = 8, .do_chans = 5 }*/ }; #define thisboard ((const struct adq12b_board *)dev->board_ptr) struct adq12b_private { int unipolar; /* option 2 of comedi_config (1 is iobase) */ int differential; /* option 3 of comedi_config */ int last_channel; int last_range; unsigned int digital_state; }; #define devpriv ((struct adq12b_private *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int adq12b_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int adq12b_detach(struct comedi_device *dev); static struct comedi_driver driver_adq12b = { .driver_name = "adq12b", .module = THIS_MODULE, .attach = adq12b_attach, .detach = adq12b_detach, .board_name = &adq12b_boards[0].name, .offset = sizeof(struct adq12b_board), .num_names = ARRAY_SIZE(adq12b_boards), }; static int adq12b_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adq12b_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adq12b_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int adq12b_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase; int unipolar, differential; iobase = it->options[0]; unipolar = it->options[1]; differential = it->options[2]; printk(KERN_INFO "comedi%d: adq12b called with options base=0x%03lx, " "%s and %s\n", dev->minor, iobase, (unipolar == 1) ? "unipolar" : "bipolar", (differential == 1) ? "differential" : "single-ended"); /* if no address was specified, try the default 0x300 */ if (iobase == 0) { printk(KERN_WARNING "comedi%d: adq12b warning: I/O base " "address not specified. Trying the default 0x300.\n", dev->minor); iobase = 0x300; } printk("comedi%d: adq12b: 0x%04lx ", dev->minor, iobase); if (!request_region(iobase, ADQ12B_SIZE, "adq12b")) { printk("I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* * Initialize dev->board_name. Note that we can use the "thisboard" * macro now, since we just initialized it in the last line. */ dev->board_name = thisboard->name; /* * Allocate the private structure area. alloc_private() is a * convenient macro defined in comedidev.h. */ if (alloc_private(dev, sizeof(struct adq12b_private)) < 0) return -ENOMEM; /* fill in devpriv structure */ devpriv->unipolar = unipolar; devpriv->differential = differential; devpriv->digital_state = 0; /* initialize channel and range to -1 so we make sure we always write at least once to the CTREG in the instruction */ devpriv->last_channel = -1; devpriv->last_range = -1; /* * Allocate the subdevice structures. alloc_subdevice() is a * convenient macro defined in comedidev.h. */ if (alloc_subdevices(dev, 3) < 0) return -ENOMEM; s = dev->subdevices + 0; /* analog input subdevice */ s->type = COMEDI_SUBD_AI; if (differential) { s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF; s->n_chan = thisboard->ai_diff_chans; } else { s->subdev_flags = SDF_READABLE | SDF_GROUND; s->n_chan = thisboard->ai_se_chans; } if (unipolar) s->range_table = &range_adq12b_ai_unipolar; else s->range_table = &range_adq12b_ai_bipolar; s->maxdata = (1 << thisboard->ai_bits) - 1; s->len_chanlist = 4; /* This is the maximum chanlist length that the board can handle */ s->insn_read = adq12b_ai_rinsn; s = dev->subdevices + 1; /* digital input subdevice */ s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = thisboard->di_chans; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = adq12b_di_insn_bits; s = dev->subdevices + 2; /* digital output subdevice */ s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = thisboard->do_chans; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = adq12b_do_insn_bits; printk(KERN_INFO "attached\n"); return 0; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int adq12b_detach(struct comedi_device *dev) { if (dev->iobase) release_region(dev->iobase, ADQ12B_SIZE); kfree(devpriv); printk(KERN_INFO "comedi%d: adq12b: removed\n", dev->minor); return 0; } /* * "instructions" read/write data in "one-shot" or "software-triggered" * mode. */ static int adq12b_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, i; int range, channel; unsigned char hi, lo, status; /* change channel and range only if it is different from the previous */ range = CR_RANGE(insn->chanspec); channel = CR_CHAN(insn->chanspec); if (channel != devpriv->last_channel || range != devpriv->last_range) { outb((range << 4) | channel, dev->iobase + ADQ12B_CTREG); udelay(50); /* wait for the mux to settle */ } /* trigger conversion */ status = inb(dev->iobase + ADQ12B_ADLOW); /* convert n samples */ for (n = 0; n < insn->n; n++) { /* wait for end of conversion */ i = 0; do { /* udelay(1); */ status = inb(dev->iobase + ADQ12B_STINR); status = status & ADQ12B_EOC; } while (status == 0 && ++i < TIMEOUT); /* } while (++i < 10); */ /* read data */ hi = inb(dev->iobase + ADQ12B_ADHIG); lo = inb(dev->iobase + ADQ12B_ADLOW); /* printk("debug: chan=%d range=%d status=%d hi=%d lo=%d\n", channel, range, status, hi, lo); */ data[n] = (hi << 8) | lo; } /* return the number of samples read/written */ return n; } static int adq12b_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { /* only bits 0-4 have information about digital inputs */ data[1] = (inb(dev->iobase + ADQ12B_STINR) & (0x1f)); return 2; } static int adq12b_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel; for (channel = 0; channel < 8; channel++) if (((data[0] >> channel) & 0x01) != 0) outb((((data[1] >> channel) & 0x01) << 3) | channel, dev->iobase + ADQ12B_OUTBR); /* store information to retrieve when asked for reading */ if (data[0]) { devpriv->digital_state &= ~data[0]; devpriv->digital_state |= (data[0] & data[1]); } data[1] = devpriv->digital_state; return 2; } /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ static int __init driver_adq12b_init_module(void) { return comedi_driver_register(&driver_adq12b); } static void __exit driver_adq12b_cleanup_module(void) { comedi_driver_unregister(&driver_adq12b); } module_init(driver_adq12b_init_module); module_exit(driver_adq12b_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
szezso/android_kernel_samsung_expressltexx
drivers/sn/ioc3.c
9265
20530
/* * SGI IOC3 master driver and IRQ demuxer * * Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org> * Heavily based on similar work by: * Brent Casavant <bcasavan@sgi.com> - IOC4 master driver * Pat Gefre <pfg@sgi.com> - IOC3 serial port IRQ demuxer */ #include <linux/errno.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/ioc3.h> #include <linux/rwsem.h> #include <linux/slab.h> #define IOC3_PCI_SIZE 0x100000 static LIST_HEAD(ioc3_devices); static int ioc3_counter; static DECLARE_RWSEM(ioc3_devices_rwsem); static struct ioc3_submodule *ioc3_submodules[IOC3_MAX_SUBMODULES]; static struct ioc3_submodule *ioc3_ethernet; static DEFINE_RWLOCK(ioc3_submodules_lock); /* NIC probing code */ #define GPCR_MLAN_EN 0x00200000 /* enable MCR to pin 8 */ static inline unsigned mcr_pack(unsigned pulse, unsigned sample) { return (pulse << 10) | (sample << 2); } static int nic_wait(struct ioc3_driver_data *idd) { unsigned mcr; do { mcr = readl(&idd->vma->mcr); } while (!(mcr & 2)); return mcr & 1; } static int nic_reset(struct ioc3_driver_data *idd) { int presence; unsigned long flags; local_irq_save(flags); writel(mcr_pack(500, 65), &idd->vma->mcr); presence = nic_wait(idd); local_irq_restore(flags); udelay(500); return presence; } static int nic_read_bit(struct ioc3_driver_data *idd) { int result; unsigned long flags; local_irq_save(flags); writel(mcr_pack(6, 13), &idd->vma->mcr); result = nic_wait(idd); local_irq_restore(flags); udelay(500); return result; } static void nic_write_bit(struct ioc3_driver_data *idd, int bit) { if (bit) writel(mcr_pack(6, 110), &idd->vma->mcr); else writel(mcr_pack(80, 30), &idd->vma->mcr); nic_wait(idd); } static unsigned nic_read_byte(struct ioc3_driver_data *idd) { unsigned result = 0; int i; for (i = 0; i < 8; i++) result = (result >> 1) | (nic_read_bit(idd) << 7); return result; } static void nic_write_byte(struct ioc3_driver_data *idd, int byte) { int i, bit; for (i = 8; i; i--) { bit = byte & 1; byte >>= 1; nic_write_bit(idd, bit); } } static unsigned long nic_find(struct ioc3_driver_data *idd, int *last, unsigned long addr) { int a, b, index, disc; nic_reset(idd); /* Search ROM. */ nic_write_byte(idd, 0xF0); /* Algorithm from ``Book of iButton Standards''. */ for (index = 0, disc = 0; index < 64; index++) { a = nic_read_bit(idd); b = nic_read_bit(idd); if (a && b) { printk(KERN_WARNING "IOC3 NIC search failed.\n"); *last = 0; return 0; } if (!a && !b) { if (index == *last) { addr |= 1UL << index; } else if (index > *last) { addr &= ~(1UL << index); disc = index; } else if ((addr & (1UL << index)) == 0) disc = index; nic_write_bit(idd, (addr>>index)&1); continue; } else { if (a) addr |= 1UL << index; else addr &= ~(1UL << index); nic_write_bit(idd, a); continue; } } *last = disc; return addr; } static void nic_addr(struct ioc3_driver_data *idd, unsigned long addr) { int index; nic_reset(idd); nic_write_byte(idd, 0xF0); for (index = 0; index < 64; index++) { nic_read_bit(idd); nic_read_bit(idd); nic_write_bit(idd, (addr>>index)&1); } } static void crc16_byte(unsigned int *crc, unsigned char db) { int i; for(i=0;i<8;i++) { *crc <<= 1; if((db^(*crc>>16)) & 1) *crc ^= 0x8005; db >>= 1; } *crc &= 0xFFFF; } static unsigned int crc16_area(unsigned char *dbs, int size, unsigned int crc) { while(size--) crc16_byte(&crc, *(dbs++)); return crc; } static void crc8_byte(unsigned int *crc, unsigned char db) { int i,f; for(i=0;i<8;i++) { f = (*crc ^ db) & 1; *crc >>= 1; db >>= 1; if(f) *crc ^= 0x8c; } *crc &= 0xff; } static unsigned int crc8_addr(unsigned long addr) { int i; unsigned int crc = 0x00; for(i=0;i<8;i++) crc8_byte(&crc, addr>>(i<<3)); return crc; } static void read_redir_page(struct ioc3_driver_data *idd, unsigned long addr, int page, unsigned char *redir, unsigned char *data) { int loops = 16, i; while(redir[page] != 0xFF) { page = redir[page]^0xFF; loops--; if(loops<0) { printk(KERN_ERR "IOC3: NIC circular redirection\n"); return; } } loops = 3; while(loops>0) { nic_addr(idd, addr); nic_write_byte(idd, 0xF0); nic_write_byte(idd, (page << 5) & 0xE0); nic_write_byte(idd, (page >> 3) & 0x1F); for(i=0;i<0x20;i++) data[i] = nic_read_byte(idd); if(crc16_area(data, 0x20, 0x0000) == 0x800d) return; loops--; } printk(KERN_ERR "IOC3: CRC error in data page\n"); for(i=0;i<0x20;i++) data[i] = 0x00; } static void read_redir_map(struct ioc3_driver_data *idd, unsigned long addr, unsigned char *redir) { int i,j,loops = 3,crc_ok; unsigned int crc; while(loops>0) { crc_ok = 1; nic_addr(idd, addr); nic_write_byte(idd, 0xAA); nic_write_byte(idd, 0x00); nic_write_byte(idd, 0x01); for(i=0;i<64;i+=8) { for(j=0;j<8;j++) redir[i+j] = nic_read_byte(idd); crc = crc16_area(redir+i, 8, (i==0)?0x8707:0x0000); crc16_byte(&crc, nic_read_byte(idd)); crc16_byte(&crc, nic_read_byte(idd)); if(crc != 0x800d) crc_ok = 0; } if(crc_ok) return; loops--; } printk(KERN_ERR "IOC3: CRC error in redirection page\n"); for(i=0;i<64;i++) redir[i] = 0xFF; } static void read_nic(struct ioc3_driver_data *idd, unsigned long addr) { unsigned char redir[64]; unsigned char data[64],part[32]; int i,j; /* read redirections */ read_redir_map(idd, addr, redir); /* read data pages */ read_redir_page(idd, addr, 0, redir, data); read_redir_page(idd, addr, 1, redir, data+32); /* assemble the part # */ j=0; for(i=0;i<19;i++) if(data[i+11] != ' ') part[j++] = data[i+11]; for(i=0;i<6;i++) if(data[i+32] != ' ') part[j++] = data[i+32]; part[j] = 0; /* skip Octane power supplies */ if(!strncmp(part, "060-0035-", 9)) return; if(!strncmp(part, "060-0038-", 9)) return; strcpy(idd->nic_part, part); /* assemble the serial # */ j=0; for(i=0;i<10;i++) if(data[i+1] != ' ') idd->nic_serial[j++] = data[i+1]; idd->nic_serial[j] = 0; } static void read_mac(struct ioc3_driver_data *idd, unsigned long addr) { int i, loops = 3; unsigned char data[13]; while(loops>0) { nic_addr(idd, addr); nic_write_byte(idd, 0xF0); nic_write_byte(idd, 0x00); nic_write_byte(idd, 0x00); nic_read_byte(idd); for(i=0;i<13;i++) data[i] = nic_read_byte(idd); if(crc16_area(data, 13, 0x0000) == 0x800d) { for(i=10;i>4;i--) idd->nic_mac[10-i] = data[i]; return; } loops--; } printk(KERN_ERR "IOC3: CRC error in MAC address\n"); for(i=0;i<6;i++) idd->nic_mac[i] = 0x00; } static void probe_nic(struct ioc3_driver_data *idd) { int save = 0, loops = 3; unsigned long first, addr; writel(GPCR_MLAN_EN, &idd->vma->gpcr_s); while(loops>0) { idd->nic_part[0] = 0; idd->nic_serial[0] = 0; addr = first = nic_find(idd, &save, 0); if(!first) return; while(1) { if(crc8_addr(addr)) break; else { switch(addr & 0xFF) { case 0x0B: read_nic(idd, addr); break; case 0x09: case 0x89: case 0x91: read_mac(idd, addr); break; } } addr = nic_find(idd, &save, addr); if(addr == first) return; } loops--; } printk(KERN_ERR "IOC3: CRC error in NIC address\n"); } /* Interrupts */ static void write_ireg(struct ioc3_driver_data *idd, uint32_t val, int which) { unsigned long flags; spin_lock_irqsave(&idd->ir_lock, flags); switch (which) { case IOC3_W_IES: writel(val, &idd->vma->sio_ies); break; case IOC3_W_IEC: writel(val, &idd->vma->sio_iec); break; } spin_unlock_irqrestore(&idd->ir_lock, flags); } static inline uint32_t get_pending_intrs(struct ioc3_driver_data *idd) { unsigned long flag; uint32_t intrs = 0; spin_lock_irqsave(&idd->ir_lock, flag); intrs = readl(&idd->vma->sio_ir); intrs &= readl(&idd->vma->sio_ies); spin_unlock_irqrestore(&idd->ir_lock, flag); return intrs; } static irqreturn_t ioc3_intr_io(int irq, void *arg) { unsigned long flags; struct ioc3_driver_data *idd = arg; int handled = 1, id; unsigned int pending; read_lock_irqsave(&ioc3_submodules_lock, flags); if(idd->dual_irq && readb(&idd->vma->eisr)) { /* send Ethernet IRQ to the driver */ if(ioc3_ethernet && idd->active[ioc3_ethernet->id] && ioc3_ethernet->intr) { handled = handled && !ioc3_ethernet->intr(ioc3_ethernet, idd, 0); } } pending = get_pending_intrs(idd); /* look at the IO IRQs */ for(id=0;id<IOC3_MAX_SUBMODULES;id++) { if(idd->active[id] && ioc3_submodules[id] && (pending & ioc3_submodules[id]->irq_mask) && ioc3_submodules[id]->intr) { write_ireg(idd, ioc3_submodules[id]->irq_mask, IOC3_W_IEC); if(!ioc3_submodules[id]->intr(ioc3_submodules[id], idd, pending & ioc3_submodules[id]->irq_mask)) pending &= ~ioc3_submodules[id]->irq_mask; if (ioc3_submodules[id]->reset_mask) write_ireg(idd, ioc3_submodules[id]->irq_mask, IOC3_W_IES); } } read_unlock_irqrestore(&ioc3_submodules_lock, flags); if(pending) { printk(KERN_WARNING "IOC3: Pending IRQs 0x%08x discarded and disabled\n",pending); write_ireg(idd, pending, IOC3_W_IEC); handled = 1; } return handled?IRQ_HANDLED:IRQ_NONE; } static irqreturn_t ioc3_intr_eth(int irq, void *arg) { unsigned long flags; struct ioc3_driver_data *idd = (struct ioc3_driver_data *)arg; int handled = 1; if(!idd->dual_irq) return IRQ_NONE; read_lock_irqsave(&ioc3_submodules_lock, flags); if(ioc3_ethernet && idd->active[ioc3_ethernet->id] && ioc3_ethernet->intr) handled = handled && !ioc3_ethernet->intr(ioc3_ethernet, idd, 0); read_unlock_irqrestore(&ioc3_submodules_lock, flags); return handled?IRQ_HANDLED:IRQ_NONE; } void ioc3_enable(struct ioc3_submodule *is, struct ioc3_driver_data *idd, unsigned int irqs) { write_ireg(idd, irqs & is->irq_mask, IOC3_W_IES); } void ioc3_ack(struct ioc3_submodule *is, struct ioc3_driver_data *idd, unsigned int irqs) { writel(irqs & is->irq_mask, &idd->vma->sio_ir); } void ioc3_disable(struct ioc3_submodule *is, struct ioc3_driver_data *idd, unsigned int irqs) { write_ireg(idd, irqs & is->irq_mask, IOC3_W_IEC); } void ioc3_gpcr_set(struct ioc3_driver_data *idd, unsigned int val) { unsigned long flags; spin_lock_irqsave(&idd->gpio_lock, flags); writel(val, &idd->vma->gpcr_s); spin_unlock_irqrestore(&idd->gpio_lock, flags); } /* Keep it simple, stupid! */ static int find_slot(void **tab, int max) { int i; for(i=0;i<max;i++) if(!(tab[i])) return i; return -1; } /* Register an IOC3 submodule */ int ioc3_register_submodule(struct ioc3_submodule *is) { struct ioc3_driver_data *idd; int alloc_id; unsigned long flags; write_lock_irqsave(&ioc3_submodules_lock, flags); alloc_id = find_slot((void **)ioc3_submodules, IOC3_MAX_SUBMODULES); if(alloc_id != -1) { ioc3_submodules[alloc_id] = is; if(is->ethernet) { if(ioc3_ethernet==NULL) ioc3_ethernet=is; else printk(KERN_WARNING "IOC3 Ethernet module already registered!\n"); } } write_unlock_irqrestore(&ioc3_submodules_lock, flags); if(alloc_id == -1) { printk(KERN_WARNING "Increase IOC3_MAX_SUBMODULES!\n"); return -ENOMEM; } is->id=alloc_id; /* Initialize submodule for each IOC3 */ if (!is->probe) return 0; down_read(&ioc3_devices_rwsem); list_for_each_entry(idd, &ioc3_devices, list) { /* set to 1 for IRQs in probe */ idd->active[alloc_id] = 1; idd->active[alloc_id] = !is->probe(is, idd); } up_read(&ioc3_devices_rwsem); return 0; } /* Unregister an IOC3 submodule */ void ioc3_unregister_submodule(struct ioc3_submodule *is) { struct ioc3_driver_data *idd; unsigned long flags; write_lock_irqsave(&ioc3_submodules_lock, flags); if(ioc3_submodules[is->id]==is) ioc3_submodules[is->id]=NULL; else printk(KERN_WARNING "IOC3 submodule %s has wrong ID.\n",is->name); if(ioc3_ethernet==is) ioc3_ethernet = NULL; write_unlock_irqrestore(&ioc3_submodules_lock, flags); /* Remove submodule for each IOC3 */ down_read(&ioc3_devices_rwsem); list_for_each_entry(idd, &ioc3_devices, list) if(idd->active[is->id]) { if(is->remove) if(is->remove(is, idd)) printk(KERN_WARNING "%s: IOC3 submodule %s remove failed " "for pci_dev %s.\n", __func__, module_name(is->owner), pci_name(idd->pdev)); idd->active[is->id] = 0; if(is->irq_mask) write_ireg(idd, is->irq_mask, IOC3_W_IEC); } up_read(&ioc3_devices_rwsem); } /********************* * Device management * *********************/ static char * __devinitdata ioc3_class_names[]={"unknown", "IP27 BaseIO", "IP30 system", "MENET 1/2/3", "MENET 4", "CADduo", "Altix Serial"}; static int __devinit ioc3_class(struct ioc3_driver_data *idd) { int res = IOC3_CLASS_NONE; /* NIC-based logic */ if(!strncmp(idd->nic_part, "030-0891-", 9)) res = IOC3_CLASS_BASE_IP30; if(!strncmp(idd->nic_part, "030-1155-", 9)) res = IOC3_CLASS_CADDUO; if(!strncmp(idd->nic_part, "030-1657-", 9)) res = IOC3_CLASS_SERIAL; if(!strncmp(idd->nic_part, "030-1664-", 9)) res = IOC3_CLASS_SERIAL; /* total random heuristics */ #ifdef CONFIG_SGI_IP27 if(!idd->nic_part[0]) res = IOC3_CLASS_BASE_IP27; #endif /* print educational message */ printk(KERN_INFO "IOC3 part: [%s], serial: [%s] => class %s\n", idd->nic_part, idd->nic_serial, ioc3_class_names[res]); return res; } /* Adds a new instance of an IOC3 card */ static int __devinit ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct ioc3_driver_data *idd; uint32_t pcmd; int ret, id; /* Enable IOC3 and take ownership of it */ if ((ret = pci_enable_device(pdev))) { printk(KERN_WARNING "%s: Failed to enable IOC3 device for pci_dev %s.\n", __func__, pci_name(pdev)); goto out; } pci_set_master(pdev); #ifdef USE_64BIT_DMA ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (!ret) { ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (ret < 0) { printk(KERN_WARNING "%s: Unable to obtain 64 bit DMA " "for consistent allocations\n", __func__); } } #endif /* Set up per-IOC3 data */ idd = kzalloc(sizeof(struct ioc3_driver_data), GFP_KERNEL); if (!idd) { printk(KERN_WARNING "%s: Failed to allocate IOC3 data for pci_dev %s.\n", __func__, pci_name(pdev)); ret = -ENODEV; goto out_idd; } spin_lock_init(&idd->ir_lock); spin_lock_init(&idd->gpio_lock); idd->pdev = pdev; /* Map all IOC3 registers. These are shared between subdevices * so the main IOC3 module manages them. */ idd->pma = pci_resource_start(pdev, 0); if (!idd->pma) { printk(KERN_WARNING "%s: Unable to find IOC3 resource " "for pci_dev %s.\n", __func__, pci_name(pdev)); ret = -ENODEV; goto out_pci; } if (!request_mem_region(idd->pma, IOC3_PCI_SIZE, "ioc3")) { printk(KERN_WARNING "%s: Unable to request IOC3 region " "for pci_dev %s.\n", __func__, pci_name(pdev)); ret = -ENODEV; goto out_pci; } idd->vma = ioremap(idd->pma, IOC3_PCI_SIZE); if (!idd->vma) { printk(KERN_WARNING "%s: Unable to remap IOC3 region " "for pci_dev %s.\n", __func__, pci_name(pdev)); ret = -ENODEV; goto out_misc_region; } /* Track PCI-device specific data */ pci_set_drvdata(pdev, idd); down_write(&ioc3_devices_rwsem); list_add_tail(&idd->list, &ioc3_devices); idd->id = ioc3_counter++; up_write(&ioc3_devices_rwsem); idd->gpdr_shadow = readl(&idd->vma->gpdr); /* Read IOC3 NIC contents */ probe_nic(idd); /* Detect IOC3 class */ idd->class = ioc3_class(idd); /* Initialize IOC3 */ pci_read_config_dword(pdev, PCI_COMMAND, &pcmd); pci_write_config_dword(pdev, PCI_COMMAND, pcmd | PCI_COMMAND_MEMORY | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_SCR_DROP_MODE_EN); write_ireg(idd, ~0, IOC3_W_IEC); writel(~0, &idd->vma->sio_ir); /* Set up IRQs */ if(idd->class == IOC3_CLASS_BASE_IP30 || idd->class == IOC3_CLASS_BASE_IP27) { writel(0, &idd->vma->eier); writel(~0, &idd->vma->eisr); idd->dual_irq = 1; if (!request_irq(pdev->irq, ioc3_intr_eth, IRQF_SHARED, "ioc3-eth", (void *)idd)) { idd->irq_eth = pdev->irq; } else { printk(KERN_WARNING "%s : request_irq fails for IRQ 0x%x\n ", __func__, pdev->irq); } if (!request_irq(pdev->irq+2, ioc3_intr_io, IRQF_SHARED, "ioc3-io", (void *)idd)) { idd->irq_io = pdev->irq+2; } else { printk(KERN_WARNING "%s : request_irq fails for IRQ 0x%x\n ", __func__, pdev->irq+2); } } else { if (!request_irq(pdev->irq, ioc3_intr_io, IRQF_SHARED, "ioc3", (void *)idd)) { idd->irq_io = pdev->irq; } else { printk(KERN_WARNING "%s : request_irq fails for IRQ 0x%x\n ", __func__, pdev->irq); } } /* Add this IOC3 to all submodules */ for(id=0;id<IOC3_MAX_SUBMODULES;id++) if(ioc3_submodules[id] && ioc3_submodules[id]->probe) { idd->active[id] = 1; idd->active[id] = !ioc3_submodules[id]->probe (ioc3_submodules[id], idd); } printk(KERN_INFO "IOC3 Master Driver loaded for %s\n", pci_name(pdev)); return 0; out_misc_region: release_mem_region(idd->pma, IOC3_PCI_SIZE); out_pci: kfree(idd); out_idd: pci_disable_device(pdev); out: return ret; } /* Removes a particular instance of an IOC3 card. */ static void __devexit ioc3_remove(struct pci_dev *pdev) { int id; struct ioc3_driver_data *idd; idd = pci_get_drvdata(pdev); /* Remove this IOC3 from all submodules */ for(id=0;id<IOC3_MAX_SUBMODULES;id++) if(idd->active[id]) { if(ioc3_submodules[id] && ioc3_submodules[id]->remove) if(ioc3_submodules[id]->remove(ioc3_submodules[id], idd)) printk(KERN_WARNING "%s: IOC3 submodule 0x%s remove failed " "for pci_dev %s.\n", __func__, module_name(ioc3_submodules[id]->owner), pci_name(pdev)); idd->active[id] = 0; } /* Clear and disable all IRQs */ write_ireg(idd, ~0, IOC3_W_IEC); writel(~0, &idd->vma->sio_ir); /* Release resources */ free_irq(idd->irq_io, (void *)idd); if(idd->dual_irq) free_irq(idd->irq_eth, (void *)idd); iounmap(idd->vma); release_mem_region(idd->pma, IOC3_PCI_SIZE); /* Disable IOC3 and relinquish */ pci_disable_device(pdev); /* Remove and free driver data */ down_write(&ioc3_devices_rwsem); list_del(&idd->list); up_write(&ioc3_devices_rwsem); kfree(idd); } static struct pci_device_id ioc3_id_table[] = { {PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID}, {0} }; static struct pci_driver ioc3_driver = { .name = "IOC3", .id_table = ioc3_id_table, .probe = ioc3_probe, .remove = __devexit_p(ioc3_remove), }; MODULE_DEVICE_TABLE(pci, ioc3_id_table); /********************* * Module management * *********************/ /* Module load */ static int __init ioc3_init(void) { if (ia64_platform_is("sn2")) return pci_register_driver(&ioc3_driver); return -ENODEV; } /* Module unload */ static void __exit ioc3_exit(void) { pci_unregister_driver(&ioc3_driver); } module_init(ioc3_init); module_exit(ioc3_exit); MODULE_AUTHOR("Stanislaw Skowronek <skylark@linux-mips.org>"); MODULE_DESCRIPTION("PCI driver for SGI IOC3"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL_GPL(ioc3_register_submodule); EXPORT_SYMBOL_GPL(ioc3_unregister_submodule); EXPORT_SYMBOL_GPL(ioc3_ack); EXPORT_SYMBOL_GPL(ioc3_gpcr_set); EXPORT_SYMBOL_GPL(ioc3_disable); EXPORT_SYMBOL_GPL(ioc3_enable);
gpl-2.0
Vangreen/android_kernel_lge_msm8926
drivers/uwb/i1480/dfu/dfu.c
10033
6051
/* * Intel Wireless UWB Link 1480 * Main driver * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * Common code for firmware upload used by the USB and PCI version; * i1480_fw_upload() takes a device descriptor and uses the function * pointers it provides to upload firmware and prepare the PHY. * * As well, provides common functions used by the rest of the code. */ #include "i1480-dfu.h" #include <linux/errno.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/uwb.h> #include <linux/random.h> #include <linux/export.h> /* * i1480_rceb_check - Check RCEB for expected field values * @i1480: pointer to device for which RCEB is being checked * @rceb: RCEB being checked * @cmd: which command the RCEB is related to * @context: expected context * @expected_type: expected event type * @expected_event: expected event * * If @cmd is NULL, do not print error messages, but still return an error * code. * * Return 0 if @rceb matches the expected values, -EINVAL otherwise. */ int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb, const char *cmd, u8 context, u8 expected_type, unsigned expected_event) { int result = 0; struct device *dev = i1480->dev; if (rceb->bEventContext != context) { if (cmd) dev_err(dev, "%s: unexpected context id 0x%02x " "(expected 0x%02x)\n", cmd, rceb->bEventContext, context); result = -EINVAL; } if (rceb->bEventType != expected_type) { if (cmd) dev_err(dev, "%s: unexpected event type 0x%02x " "(expected 0x%02x)\n", cmd, rceb->bEventType, expected_type); result = -EINVAL; } if (le16_to_cpu(rceb->wEvent) != expected_event) { if (cmd) dev_err(dev, "%s: unexpected event 0x%04x " "(expected 0x%04x)\n", cmd, le16_to_cpu(rceb->wEvent), expected_event); result = -EINVAL; } return result; } EXPORT_SYMBOL_GPL(i1480_rceb_check); /* * Execute a Radio Control Command * * Command data has to be in i1480->cmd_buf. * * @returns size of the reply data filled in i1480->evt_buf or < 0 errno * code on error. */ ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size, size_t reply_size) { ssize_t result; struct uwb_rceb *reply = i1480->evt_buf; struct uwb_rccb *cmd = i1480->cmd_buf; u16 expected_event = reply->wEvent; u8 expected_type = reply->bEventType; u8 context; init_completion(&i1480->evt_complete); i1480->evt_result = -EINPROGRESS; do { get_random_bytes(&context, 1); } while (context == 0x00 || context == 0xff); cmd->bCommandContext = context; result = i1480->cmd(i1480, cmd_name, cmd_size); if (result < 0) goto error; /* wait for the callback to report a event was received */ result = wait_for_completion_interruptible_timeout( &i1480->evt_complete, HZ); if (result == 0) { result = -ETIMEDOUT; goto error; } if (result < 0) goto error; result = i1480->evt_result; if (result < 0) { dev_err(i1480->dev, "%s: command reply reception failed: %zd\n", cmd_name, result); goto error; } /* * Firmware versions >= 1.4.12224 for IOGear GUWA100U generate a * spurious notification after firmware is downloaded. So check whether * the receibed RCEB is such notification before assuming that the * command has failed. */ if (i1480_rceb_check(i1480, i1480->evt_buf, NULL, 0, 0xfd, 0x0022) == 0) { /* Now wait for the actual RCEB for this command. */ result = i1480->wait_init_done(i1480); if (result < 0) goto error; result = i1480->evt_result; } if (result != reply_size) { dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n", cmd_name, result, reply_size); result = -EINVAL; goto error; } /* Verify we got the right event in response */ result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, expected_type, expected_event); error: return result; } EXPORT_SYMBOL_GPL(i1480_cmd); static int i1480_print_state(struct i1480 *i1480) { int result; u32 *buf = (u32 *) i1480->cmd_buf; result = i1480->read(i1480, 0x80080000, 2 * sizeof(*buf)); if (result < 0) { dev_err(i1480->dev, "cannot read U & L states: %d\n", result); goto error; } dev_info(i1480->dev, "state U 0x%08x, L 0x%08x\n", buf[0], buf[1]); error: return result; } /* * PCI probe, firmware uploader * * _mac_fw_upload() will call rc_setup(), which needs an rc_release(). */ int i1480_fw_upload(struct i1480 *i1480) { int result; result = i1480_pre_fw_upload(i1480); /* PHY pre fw */ if (result < 0 && result != -ENOENT) { i1480_print_state(i1480); goto error; } result = i1480_mac_fw_upload(i1480); /* MAC fw */ if (result < 0) { if (result == -ENOENT) dev_err(i1480->dev, "Cannot locate MAC FW file '%s'\n", i1480->mac_fw_name); else i1480_print_state(i1480); goto error; } result = i1480_phy_fw_upload(i1480); /* PHY fw */ if (result < 0 && result != -ENOENT) { i1480_print_state(i1480); goto error_rc_release; } /* * FIXME: find some reliable way to check whether firmware is running * properly. Maybe use some standard request that has no side effects? */ dev_info(i1480->dev, "firmware uploaded successfully\n"); error_rc_release: if (i1480->rc_release) i1480->rc_release(i1480); result = 0; error: return result; } EXPORT_SYMBOL_GPL(i1480_fw_upload);
gpl-2.0
ddk50/cbc-linux-kernel
fs/ocfs2/dlmfs/dlmfsver.c
12593
1211
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmfsver.c * * version string * * Copyright (C) 2002, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include "dlmfsver.h" #define DLM_BUILD_VERSION "1.5.0" #define VERSION_STR "OCFS2 DLMFS " DLM_BUILD_VERSION void dlmfs_print_version(void) { printk(KERN_INFO "%s\n", VERSION_STR); } MODULE_DESCRIPTION(VERSION_STR); MODULE_VERSION(DLM_BUILD_VERSION);
gpl-2.0
goodhanrry/N910U_goodhanrry_kernel
fs/ocfs2/dlmfs/dlmfsver.c
12593
1211
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmfsver.c * * version string * * Copyright (C) 2002, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include "dlmfsver.h" #define DLM_BUILD_VERSION "1.5.0" #define VERSION_STR "OCFS2 DLMFS " DLM_BUILD_VERSION void dlmfs_print_version(void) { printk(KERN_INFO "%s\n", VERSION_STR); } MODULE_DESCRIPTION(VERSION_STR); MODULE_VERSION(DLM_BUILD_VERSION);
gpl-2.0
sinutech/sinuos-kernel
drivers/hwmon/ntc_thermistor.c
50
11866
/* * ntc_thermistor.c - NTC Thermistors * * Copyright (C) 2010 Samsung Electronics * MyungJoo Ham <myungjoo.ham@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/math64.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/platform_data/ntc_thermistor.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> struct ntc_compensation { int temp_C; unsigned int ohm; }; /* * A compensation table should be sorted by the values of .ohm * in descending order. * The following compensation tables are from the specification of Murata NTC * Thermistors Datasheet */ const struct ntc_compensation ncpXXwb473[] = { { .temp_C = -40, .ohm = 1747920 }, { .temp_C = -35, .ohm = 1245428 }, { .temp_C = -30, .ohm = 898485 }, { .temp_C = -25, .ohm = 655802 }, { .temp_C = -20, .ohm = 483954 }, { .temp_C = -15, .ohm = 360850 }, { .temp_C = -10, .ohm = 271697 }, { .temp_C = -5, .ohm = 206463 }, { .temp_C = 0, .ohm = 158214 }, { .temp_C = 5, .ohm = 122259 }, { .temp_C = 10, .ohm = 95227 }, { .temp_C = 15, .ohm = 74730 }, { .temp_C = 20, .ohm = 59065 }, { .temp_C = 25, .ohm = 47000 }, { .temp_C = 30, .ohm = 37643 }, { .temp_C = 35, .ohm = 30334 }, { .temp_C = 40, .ohm = 24591 }, { .temp_C = 45, .ohm = 20048 }, { .temp_C = 50, .ohm = 16433 }, { .temp_C = 55, .ohm = 13539 }, { .temp_C = 60, .ohm = 11209 }, { .temp_C = 65, .ohm = 9328 }, { .temp_C = 70, .ohm = 7798 }, { .temp_C = 75, .ohm = 6544 }, { .temp_C = 80, .ohm = 5518 }, { .temp_C = 85, .ohm = 4674 }, { .temp_C = 90, .ohm = 3972 }, { .temp_C = 95, .ohm = 3388 }, { .temp_C = 100, .ohm = 2902 }, { .temp_C = 105, .ohm = 2494 }, { .temp_C = 110, .ohm = 2150 }, { .temp_C = 115, .ohm = 1860 }, { .temp_C = 120, .ohm = 1615 }, { .temp_C = 125, .ohm = 1406 }, }; const struct ntc_compensation ncpXXwl333[] = { { .temp_C = -40, .ohm = 1610154 }, { .temp_C = -35, .ohm = 1130850 }, { .temp_C = -30, .ohm = 802609 }, { .temp_C = -25, .ohm = 575385 }, { .temp_C = -20, .ohm = 416464 }, { .temp_C = -15, .ohm = 304219 }, { .temp_C = -10, .ohm = 224193 }, { .temp_C = -5, .ohm = 166623 }, { .temp_C = 0, .ohm = 124850 }, { .temp_C = 5, .ohm = 94287 }, { .temp_C = 10, .ohm = 71747 }, { .temp_C = 15, .ohm = 54996 }, { .temp_C = 20, .ohm = 42455 }, { .temp_C = 25, .ohm = 33000 }, { .temp_C = 30, .ohm = 25822 }, { .temp_C = 35, .ohm = 20335 }, { .temp_C = 40, .ohm = 16115 }, { .temp_C = 45, .ohm = 12849 }, { .temp_C = 50, .ohm = 10306 }, { .temp_C = 55, .ohm = 8314 }, { .temp_C = 60, .ohm = 6746 }, { .temp_C = 65, .ohm = 5503 }, { .temp_C = 70, .ohm = 4513 }, { .temp_C = 75, .ohm = 3721 }, { .temp_C = 80, .ohm = 3084 }, { .temp_C = 85, .ohm = 2569 }, { .temp_C = 90, .ohm = 2151 }, { .temp_C = 95, .ohm = 1809 }, { .temp_C = 100, .ohm = 1529 }, { .temp_C = 105, .ohm = 1299 }, { .temp_C = 110, .ohm = 1108 }, { .temp_C = 115, .ohm = 949 }, { .temp_C = 120, .ohm = 817 }, { .temp_C = 125, .ohm = 707 }, }; struct ntc_data { struct device *hwmon_dev; struct ntc_thermistor_platform_data *pdata; const struct ntc_compensation *comp; struct device *dev; int n_comp; char name[PLATFORM_NAME_SIZE]; }; static inline u64 div64_u64_safe(u64 dividend, u64 divisor) { if (divisor == 0 && dividend == 0) return 0; if (divisor == 0) return UINT_MAX; return div64_u64(dividend, divisor); } static int get_ohm_of_thermistor(struct ntc_data *data, unsigned int uV) { struct ntc_thermistor_platform_data *pdata = data->pdata; u64 mV = uV / 1000; u64 pmV = pdata->pullup_uV / 1000; u64 N, puO, pdO; puO = pdata->pullup_ohm; pdO = pdata->pulldown_ohm; if (mV == 0) { if (pdata->connect == NTC_CONNECTED_POSITIVE) return INT_MAX; return 0; } if (mV >= pmV) return (pdata->connect == NTC_CONNECTED_POSITIVE) ? 0 : INT_MAX; if (pdata->connect == NTC_CONNECTED_POSITIVE && puO == 0) N = div64_u64_safe(pdO * (pmV - mV), mV); else if (pdata->connect == NTC_CONNECTED_GROUND && pdO == 0) N = div64_u64_safe(puO * mV, pmV - mV); else if (pdata->connect == NTC_CONNECTED_POSITIVE) N = div64_u64_safe(pdO * puO * (pmV - mV), puO * mV - pdO * (pmV - mV)); else N = div64_u64_safe(pdO * puO * mV, pdO * (pmV - mV) - puO * mV); if (N > INT_MAX) N = INT_MAX; return N; } static void lookup_comp(struct ntc_data *data, unsigned int ohm, int *i_low, int *i_high) { int start, end, mid; /* * Handle special cases: Resistance is higher than or equal to * resistance in first table entry, or resistance is lower or equal * to resistance in last table entry. * In these cases, return i_low == i_high, either pointing to the * beginning or to the end of the table depending on the condition. */ if (ohm >= data->comp[0].ohm) { *i_low = 0; *i_high = 0; return; } if (ohm <= data->comp[data->n_comp - 1].ohm) { *i_low = data->n_comp - 1; *i_high = data->n_comp - 1; return; } /* Do a binary search on compensation table */ start = 0; end = data->n_comp; while (start < end) { mid = start + (end - start) / 2; /* * start <= mid < end * data->comp[start].ohm > ohm >= data->comp[end].ohm * * We could check for "ohm == data->comp[mid].ohm" here, but * that is a quite unlikely condition, and we would have to * check again after updating start. Check it at the end instead * for simplicity. */ if (ohm >= data->comp[mid].ohm) { end = mid; } else { start = mid + 1; /* * ohm >= data->comp[start].ohm might be true here, * since we set start to mid + 1. In that case, we are * done. We could keep going, but the condition is quite * likely to occur, so it is worth checking for it. */ if (ohm >= data->comp[start].ohm) end = start; } /* * start <= end * data->comp[start].ohm >= ohm >= data->comp[end].ohm */ } /* * start == end * ohm >= data->comp[end].ohm */ *i_low = end; if (ohm == data->comp[end].ohm) *i_high = end; else *i_high = end - 1; } static int get_temp_mC(struct ntc_data *data, unsigned int ohm) { int low, high; int temp; lookup_comp(data, ohm, &low, &high); if (low == high) { /* Unable to use linear approximation */ temp = data->comp[low].temp_C * 1000; } else { temp = data->comp[low].temp_C * 1000 + ((data->comp[high].temp_C - data->comp[low].temp_C) * 1000 * ((int)ohm - (int)data->comp[low].ohm)) / ((int)data->comp[high].ohm - (int)data->comp[low].ohm); } return temp; } static int ntc_thermistor_get_ohm(struct ntc_data *data) { int read_uV; if (data->pdata->read_ohm) return data->pdata->read_ohm(); if (data->pdata->read_uV) { read_uV = data->pdata->read_uV(); if (read_uV < 0) return read_uV; return get_ohm_of_thermistor(data, read_uV); } return -EINVAL; } static ssize_t ntc_show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct ntc_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static ssize_t ntc_show_type(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "4\n"); } static ssize_t ntc_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct ntc_data *data = dev_get_drvdata(dev); int ohm; ohm = ntc_thermistor_get_ohm(data); if (ohm < 0) return ohm; return sprintf(buf, "%d\n", get_temp_mC(data, ohm)); } static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, ntc_show_type, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ntc_show_temp, NULL, 0); static DEVICE_ATTR(name, S_IRUGO, ntc_show_name, NULL); static struct attribute *ntc_attributes[] = { &dev_attr_name.attr, &sensor_dev_attr_temp1_type.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, NULL, }; static const struct attribute_group ntc_attr_group = { .attrs = ntc_attributes, }; static int __devinit ntc_thermistor_probe(struct platform_device *pdev) { struct ntc_data *data; struct ntc_thermistor_platform_data *pdata = pdev->dev.platform_data; int ret = 0; if (!pdata) { dev_err(&pdev->dev, "No platform init data supplied.\n"); return -ENODEV; } /* Either one of the two is required. */ if (!pdata->read_uV && !pdata->read_ohm) { dev_err(&pdev->dev, "Both read_uV and read_ohm missing. Need either one of the two.\n"); return -EINVAL; } if (pdata->read_uV && pdata->read_ohm) { dev_warn(&pdev->dev, "Only one of read_uV and read_ohm is needed; ignoring read_uV.\n"); pdata->read_uV = NULL; } if (pdata->read_uV && (pdata->pullup_uV == 0 || (pdata->pullup_ohm == 0 && pdata->connect == NTC_CONNECTED_GROUND) || (pdata->pulldown_ohm == 0 && pdata->connect == NTC_CONNECTED_POSITIVE) || (pdata->connect != NTC_CONNECTED_POSITIVE && pdata->connect != NTC_CONNECTED_GROUND))) { dev_err(&pdev->dev, "Required data to use read_uV not supplied.\n"); return -EINVAL; } data = devm_kzalloc(&pdev->dev, sizeof(struct ntc_data), GFP_KERNEL); if (!data) return -ENOMEM; data->dev = &pdev->dev; data->pdata = pdata; strncpy(data->name, pdev->id_entry->name, PLATFORM_NAME_SIZE); switch (pdev->id_entry->driver_data) { case TYPE_NCPXXWB473: data->comp = ncpXXwb473; data->n_comp = ARRAY_SIZE(ncpXXwb473); break; case TYPE_NCPXXWL333: data->comp = ncpXXwl333; data->n_comp = ARRAY_SIZE(ncpXXwl333); break; default: dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n", pdev->id_entry->driver_data, pdev->id_entry->name); return -EINVAL; } platform_set_drvdata(pdev, data); ret = sysfs_create_group(&data->dev->kobj, &ntc_attr_group); if (ret) { dev_err(data->dev, "unable to create sysfs files\n"); return ret; } data->hwmon_dev = hwmon_device_register(data->dev); if (IS_ERR(data->hwmon_dev)) { dev_err(data->dev, "unable to register as hwmon device.\n"); ret = PTR_ERR(data->hwmon_dev); goto err_after_sysfs; } dev_info(&pdev->dev, "Thermistor %s:%d (type: %s/%lu) successfully probed.\n", pdev->name, pdev->id, pdev->id_entry->name, pdev->id_entry->driver_data); return 0; err_after_sysfs: sysfs_remove_group(&data->dev->kobj, &ntc_attr_group); return ret; } static int __devexit ntc_thermistor_remove(struct platform_device *pdev) { struct ntc_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&data->dev->kobj, &ntc_attr_group); platform_set_drvdata(pdev, NULL); return 0; } static const struct platform_device_id ntc_thermistor_id[] = { { "ncp15wb473", TYPE_NCPXXWB473 }, { "ncp18wb473", TYPE_NCPXXWB473 }, { "ncp21wb473", TYPE_NCPXXWB473 }, { "ncp03wb473", TYPE_NCPXXWB473 }, { "ncp15wl333", TYPE_NCPXXWL333 }, { }, }; static struct platform_driver ntc_thermistor_driver = { .driver = { .name = "ntc-thermistor", .owner = THIS_MODULE, }, .probe = ntc_thermistor_probe, .remove = __devexit_p(ntc_thermistor_remove), .id_table = ntc_thermistor_id, }; module_platform_driver(ntc_thermistor_driver); MODULE_DESCRIPTION("NTC Thermistor Driver"); MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ntc-thermistor");
gpl-2.0
MassStash/htc_pme_kernel_sense_6.0
sound/soc/codecs/wcd9xxx-resmgr.c
50
32260
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/firmware.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/device.h> #include <linux/printk.h> #include <linux/ratelimit.h> #include <linux/debugfs.h> #include <linux/mfd/wcd9xxx/core.h> #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h> #include <uapi/linux/mfd/wcd9xxx/wcd9320_registers.h> #include <linux/mfd/wcd9xxx/wcd9330_registers.h> #include <linux/mfd/wcd9xxx/pdata.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/tlv.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/pm_runtime.h> #include <linux/kernel.h> #include <linux/gpio.h> #include "wcd9xxx-resmgr.h" static char wcd9xxx_event_string[][64] = { "WCD9XXX_EVENT_INVALID", "WCD9XXX_EVENT_PRE_RCO_ON", "WCD9XXX_EVENT_POST_RCO_ON", "WCD9XXX_EVENT_PRE_RCO_OFF", "WCD9XXX_EVENT_POST_RCO_OFF", "WCD9XXX_EVENT_PRE_MCLK_ON", "WCD9XXX_EVENT_POST_MCLK_ON", "WCD9XXX_EVENT_PRE_MCLK_OFF", "WCD9XXX_EVENT_POST_MCLK_OFF", "WCD9XXX_EVENT_PRE_BG_OFF", "WCD9XXX_EVENT_POST_BG_OFF", "WCD9XXX_EVENT_PRE_BG_AUDIO_ON", "WCD9XXX_EVENT_POST_BG_AUDIO_ON", "WCD9XXX_EVENT_PRE_BG_MBHC_ON", "WCD9XXX_EVENT_POST_BG_MBHC_ON", "WCD9XXX_EVENT_PRE_MICBIAS_1_OFF", "WCD9XXX_EVENT_POST_MICBIAS_1_OFF", "WCD9XXX_EVENT_PRE_MICBIAS_2_OFF", "WCD9XXX_EVENT_POST_MICBIAS_2_OFF", "WCD9XXX_EVENT_PRE_MICBIAS_3_OFF", "WCD9XXX_EVENT_POST_MICBIAS_3_OFF", "WCD9XXX_EVENT_PRE_MICBIAS_4_OFF", "WCD9XXX_EVENT_POST_MICBIAS_4_OFF", "WCD9XXX_EVENT_PRE_MICBIAS_1_ON", "WCD9XXX_EVENT_POST_MICBIAS_1_ON", "WCD9XXX_EVENT_PRE_MICBIAS_2_ON", "WCD9XXX_EVENT_POST_MICBIAS_2_ON", "WCD9XXX_EVENT_PRE_MICBIAS_3_ON", "WCD9XXX_EVENT_POST_MICBIAS_3_ON", "WCD9XXX_EVENT_PRE_MICBIAS_4_ON", "WCD9XXX_EVENT_POST_MICBIAS_4_ON", "WCD9XXX_EVENT_PRE_CFILT_1_OFF", "WCD9XXX_EVENT_POST_CFILT_1_OFF", "WCD9XXX_EVENT_PRE_CFILT_2_OFF", "WCD9XXX_EVENT_POST_CFILT_2_OFF", "WCD9XXX_EVENT_PRE_CFILT_3_OFF", "WCD9XXX_EVENT_POST_CFILT_3_OFF", "WCD9XXX_EVENT_PRE_CFILT_1_ON", "WCD9XXX_EVENT_POST_CFILT_1_ON", "WCD9XXX_EVENT_PRE_CFILT_2_ON", "WCD9XXX_EVENT_POST_CFILT_2_ON", "WCD9XXX_EVENT_PRE_CFILT_3_ON", "WCD9XXX_EVENT_POST_CFILT_3_ON", "WCD9XXX_EVENT_PRE_HPHL_PA_ON", "WCD9XXX_EVENT_POST_HPHL_PA_OFF", "WCD9XXX_EVENT_PRE_HPHR_PA_ON", "WCD9XXX_EVENT_POST_HPHR_PA_OFF", "WCD9XXX_EVENT_POST_RESUME", "WCD9XXX_EVENT_PRE_TX_3_ON", "WCD9XXX_EVENT_POST_TX_3_OFF", "WCD9XXX_EVENT_LAST", }; #define WCD9XXX_RCO_CALIBRATION_RETRY_COUNT 5 #define WCD9XXX_RCO_CALIBRATION_DELAY_US 5000 #define WCD9XXX_USLEEP_RANGE_MARGIN_US 100 #define WCD9XXX_RCO_CALIBRATION_DELAY_INC_US 1000 struct wcd9xxx_resmgr_cond_entry { unsigned short reg; int shift; bool invert; enum wcd9xxx_resmgr_cond cond; struct list_head list; }; static enum wcd9xxx_clock_type wcd9xxx_save_clock(struct wcd9xxx_resmgr *resmgr); static void wcd9xxx_restore_clock(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_clock_type type); const char *wcd9xxx_get_event_string(enum wcd9xxx_notify_event type) { return wcd9xxx_event_string[type]; } void wcd9xxx_resmgr_notifier_call(struct wcd9xxx_resmgr *resmgr, const enum wcd9xxx_notify_event e) { pr_debug("%s: notifier call event %d\n", __func__, e); blocking_notifier_call_chain(&resmgr->notifier, e, resmgr); } static void wcd9xxx_disable_bg(struct wcd9xxx_resmgr *resmgr) { /* Notify bg mode change */ wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_OFF); /* Disable bg */ snd_soc_update_bits(resmgr->codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x03, 0x00); usleep_range(100, 110); /* Notify bg mode change */ wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_OFF); } /* * BG enablement should always enable in slow mode. * The fast mode doesn't need to be enabled as fast mode BG is to be driven * by MBHC override. */ static void wcd9xxx_enable_bg(struct wcd9xxx_resmgr *resmgr) { struct snd_soc_codec *codec = resmgr->codec; /* Enable BG in slow mode and precharge */ snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x80, 0x80); snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x04, 0x04); snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x01, 0x01); usleep_range(1000, 1100); snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x80, 0x00); } static void wcd9xxx_enable_bg_audio(struct wcd9xxx_resmgr *resmgr) { /* Notify bandgap mode change */ wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_AUDIO_ON); wcd9xxx_enable_bg(resmgr); /* Notify bandgap mode change */ wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_AUDIO_ON); } static void wcd9xxx_enable_bg_mbhc(struct wcd9xxx_resmgr *resmgr) { struct snd_soc_codec *codec = resmgr->codec; /* Notify bandgap mode change */ wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_MBHC_ON); /* * mclk should be off or clk buff source souldn't be VBG * Let's turn off mclk always */ WARN_ON(snd_soc_read(codec, WCD9XXX_A_CLK_BUFF_EN2) & (1 << 2)); wcd9xxx_enable_bg(resmgr); /* Notify bandgap mode change */ wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_MBHC_ON); } static void wcd9xxx_disable_clock_block(struct wcd9xxx_resmgr *resmgr) { struct snd_soc_codec *codec = resmgr->codec; pr_debug("%s: enter\n", __func__); WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); /* Notify */ if (resmgr->clk_type == WCD9XXX_CLK_RCO) wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_RCO_OFF); else wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_MCLK_OFF); switch (resmgr->codec_type) { case WCD9XXX_CDC_TYPE_TOMTOM: snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x00); usleep_range(50, 55); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x02); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x40); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x00); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x00); break; default: snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x00); usleep_range(50, 55); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x02); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x05, 0x00); break; } usleep_range(50, 55); /* Notify */ if (resmgr->clk_type == WCD9XXX_CLK_RCO) { wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_RCO_OFF); } else { wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_MCLK_OFF); } pr_debug("%s: leave\n", __func__); } static void wcd9xxx_resmgr_cdc_specific_get_clk(struct wcd9xxx_resmgr *resmgr, int clk_users) { /* Caller of this funcion should have acquired * BG_CLK lock */ WCD9XXX_BG_CLK_UNLOCK(resmgr); if (clk_users) { if (resmgr->resmgr_cb && resmgr->resmgr_cb->cdc_rco_ctrl) { while (clk_users--) resmgr->resmgr_cb->cdc_rco_ctrl(resmgr->codec, true); } } /* Acquire BG_CLK lock before return */ WCD9XXX_BG_CLK_LOCK(resmgr); } void wcd9xxx_resmgr_post_ssr(struct wcd9xxx_resmgr *resmgr) { int old_bg_audio_users, old_bg_mbhc_users; int old_clk_rco_users, old_clk_mclk_users; pr_debug("%s: enter\n", __func__); WCD9XXX_BG_CLK_LOCK(resmgr); old_bg_audio_users = resmgr->bg_audio_users; old_bg_mbhc_users = resmgr->bg_mbhc_users; old_clk_rco_users = resmgr->clk_rco_users; old_clk_mclk_users = resmgr->clk_mclk_users; resmgr->bg_audio_users = 0; resmgr->bg_mbhc_users = 0; resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF; resmgr->clk_rco_users = 0; resmgr->clk_mclk_users = 0; resmgr->clk_type = WCD9XXX_CLK_OFF; if (old_bg_audio_users) { while (old_bg_audio_users--) wcd9xxx_resmgr_get_bandgap(resmgr, WCD9XXX_BANDGAP_AUDIO_MODE); } if (old_bg_mbhc_users) { while (old_bg_mbhc_users--) wcd9xxx_resmgr_get_bandgap(resmgr, WCD9XXX_BANDGAP_MBHC_MODE); } if (old_clk_mclk_users) { while (old_clk_mclk_users--) wcd9xxx_resmgr_get_clk_block(resmgr, WCD9XXX_CLK_MCLK); } if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) { wcd9xxx_resmgr_cdc_specific_get_clk(resmgr, old_clk_rco_users); } else if (old_clk_rco_users) { while (old_clk_rco_users--) wcd9xxx_resmgr_get_clk_block(resmgr, WCD9XXX_CLK_RCO); } WCD9XXX_BG_CLK_UNLOCK(resmgr); pr_debug("%s: leave\n", __func__); } /* * wcd9xxx_resmgr_get_bandgap : Vote for bandgap ref * choice : WCD9XXX_BANDGAP_AUDIO_MODE, WCD9XXX_BANDGAP_MBHC_MODE */ void wcd9xxx_resmgr_get_bandgap(struct wcd9xxx_resmgr *resmgr, const enum wcd9xxx_bandgap_type choice) { enum wcd9xxx_clock_type clock_save = WCD9XXX_CLK_OFF; pr_debug("%s: enter, wants %d\n", __func__, choice); WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); switch (choice) { case WCD9XXX_BANDGAP_AUDIO_MODE: resmgr->bg_audio_users++; if (resmgr->bg_audio_users == 1 && resmgr->bg_mbhc_users) { /* * Current bg is MBHC mode, about to switch to * audio mode. */ WARN_ON(resmgr->bandgap_type != WCD9XXX_BANDGAP_MBHC_MODE); /* BG mode can be changed only with clock off */ if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM) clock_save = wcd9xxx_save_clock(resmgr); /* Swtich BG mode */ wcd9xxx_disable_bg(resmgr); wcd9xxx_enable_bg_audio(resmgr); /* restore clock */ if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM) wcd9xxx_restore_clock(resmgr, clock_save); } else if (resmgr->bg_audio_users == 1) { /* currently off, just enable it */ WARN_ON(resmgr->bandgap_type != WCD9XXX_BANDGAP_OFF); wcd9xxx_enable_bg_audio(resmgr); } resmgr->bandgap_type = WCD9XXX_BANDGAP_AUDIO_MODE; break; case WCD9XXX_BANDGAP_MBHC_MODE: resmgr->bg_mbhc_users++; if (resmgr->bandgap_type == WCD9XXX_BANDGAP_MBHC_MODE || resmgr->bandgap_type == WCD9XXX_BANDGAP_AUDIO_MODE) /* do nothing */ break; /* bg mode can be changed only with clock off */ clock_save = wcd9xxx_save_clock(resmgr); /* enable bg with MBHC mode */ wcd9xxx_enable_bg_mbhc(resmgr); /* restore clock */ wcd9xxx_restore_clock(resmgr, clock_save); /* save current mode */ resmgr->bandgap_type = WCD9XXX_BANDGAP_MBHC_MODE; break; default: pr_err("%s: Error, Invalid bandgap settings\n", __func__); break; } pr_debug("%s: bg users audio %d, mbhc %d\n", __func__, resmgr->bg_audio_users, resmgr->bg_mbhc_users); } /* * wcd9xxx_resmgr_put_bandgap : Unvote bandgap ref that has been voted * choice : WCD9XXX_BANDGAP_AUDIO_MODE, WCD9XXX_BANDGAP_MBHC_MODE */ void wcd9xxx_resmgr_put_bandgap(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_bandgap_type choice) { enum wcd9xxx_clock_type clock_save; pr_debug("%s: enter choice %d\n", __func__, choice); WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); switch (choice) { case WCD9XXX_BANDGAP_AUDIO_MODE: if (--resmgr->bg_audio_users == 0) { if (resmgr->bg_mbhc_users) { /* bg mode can be changed only with clock off */ clock_save = wcd9xxx_save_clock(resmgr); /* switch to MBHC mode */ wcd9xxx_enable_bg_mbhc(resmgr); /* restore clock */ wcd9xxx_restore_clock(resmgr, clock_save); resmgr->bandgap_type = WCD9XXX_BANDGAP_MBHC_MODE; } else { /* turn off */ wcd9xxx_disable_bg(resmgr); resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF; } } break; case WCD9XXX_BANDGAP_MBHC_MODE: WARN(resmgr->bandgap_type == WCD9XXX_BANDGAP_OFF, "Unexpected bandgap type %d\n", resmgr->bandgap_type); if (--resmgr->bg_mbhc_users == 0 && resmgr->bandgap_type == WCD9XXX_BANDGAP_MBHC_MODE) { wcd9xxx_disable_bg(resmgr); resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF; } break; default: pr_err("%s: Error, Invalid bandgap settings\n", __func__); break; } pr_debug("%s: bg users audio %d, mbhc %d\n", __func__, resmgr->bg_audio_users, resmgr->bg_mbhc_users); } void wcd9xxx_resmgr_enable_rx_bias(struct wcd9xxx_resmgr *resmgr, u32 enable) { struct snd_soc_codec *codec = resmgr->codec; if (enable) { resmgr->rx_bias_count++; if (resmgr->rx_bias_count == 1) snd_soc_update_bits(codec, WCD9XXX_A_RX_COM_BIAS, 0x80, 0x80); } else { resmgr->rx_bias_count--; if (!resmgr->rx_bias_count) snd_soc_update_bits(codec, WCD9XXX_A_RX_COM_BIAS, 0x80, 0x00); } } int wcd9xxx_resmgr_enable_config_mode(struct wcd9xxx_resmgr *resmgr, int enable) { struct snd_soc_codec *codec = resmgr->codec; pr_debug("%s: enable = %d\n", __func__, enable); if (enable) { snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x10, 0); /* bandgap mode to fast */ snd_soc_write(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x17); usleep_range(5, 10); snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0x80); snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_TEST, 0x80, 0x80); usleep_range(10, 20); snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_TEST, 0x80, 0); usleep_range(10000, 10100); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x08, 0x08); } else { snd_soc_update_bits(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x1, 0); snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0); } return 0; } static void wcd9xxx_enable_clock_block(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_clock_config_mode config_mode) { struct snd_soc_codec *codec = resmgr->codec; unsigned long delay = WCD9XXX_RCO_CALIBRATION_DELAY_US; int num_retry = 0; unsigned int valr; unsigned int valr1; unsigned int valw[] = {0x01, 0x01, 0x10, 0x00}; pr_debug("%s: config_mode = %d\n", __func__, config_mode); /* transit to RCO requires mclk off */ if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM) WARN_ON(snd_soc_read(codec, WCD9XXX_A_CLK_BUFF_EN2) & (1 << 2)); if (config_mode == WCD9XXX_CFG_RCO) { /* Notify */ wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_RCO_ON); /* enable RCO and switch to it */ wcd9xxx_resmgr_enable_config_mode(resmgr, 1); snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02); usleep_range(1000, 1100); } else if (config_mode == WCD9XXX_CFG_CAL_RCO) { snd_soc_update_bits(codec, TOMTOM_A_BIAS_OSC_BG_CTL, 0x01, 0x01); /* 1ms sleep required after BG enabled */ usleep_range(1000, 1100); snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x18, 0x10); valr = snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT0) & (0x04); valr1 = snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT1) & (0x08); valr = (valr >> 1) | (valr1 >> 3); snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x60, valw[valr] << 5); snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x80, 0x80); do { snd_soc_update_bits(codec, TOMTOM_A_RCO_CALIBRATION_CTRL1, 0x80, 0x80); snd_soc_update_bits(codec, TOMTOM_A_RCO_CALIBRATION_CTRL1, 0x80, 0x00); /* RCO calibration takes approx. 5ms */ usleep_range(delay, delay + WCD9XXX_USLEEP_RANGE_MARGIN_US); if (!(snd_soc_read(codec, TOMTOM_A_RCO_CALIBRATION_RESULT1) & 0x10)) break; if (num_retry >= 3) { delay = delay + WCD9XXX_RCO_CALIBRATION_DELAY_INC_US; } } while (num_retry++ < WCD9XXX_RCO_CALIBRATION_RETRY_COUNT); } else { /* Notify */ wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_MCLK_ON); /* switch to MCLK */ switch (resmgr->codec_type) { case WCD9XXX_CDC_TYPE_TOMTOM: snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x08, 0x00); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x40); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x00); /* clk source to ext clk and clk buff ref to VBG */ snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x0C, 0x04); break; default: snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x08, 0x00); /* if RCO is enabled, switch from it */ if (snd_soc_read(codec, WCD9XXX_A_RC_OSC_FREQ) & 0x80) { snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02); wcd9xxx_resmgr_enable_config_mode(resmgr, 0); } /* clk source to ext clk and clk buff ref to VBG */ snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x0C, 0x04); break; } } if (config_mode != WCD9XXX_CFG_CAL_RCO) { snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x01); /* * sleep required by codec hardware to * enable clock buffer */ usleep_range(1000, 1200); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x00); /* on MCLK */ snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x04); snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_MCLK_CTL, 0x01, 0x01); } usleep_range(50, 55); /* Notify */ if (config_mode == WCD9XXX_CFG_RCO) wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_RCO_ON); else if (config_mode == WCD9XXX_CFG_MCLK) wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_MCLK_ON); } /* * disable clock and return previous clock state */ static enum wcd9xxx_clock_type wcd9xxx_save_clock(struct wcd9xxx_resmgr *resmgr) { WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); if (resmgr->clk_type != WCD9XXX_CLK_OFF) wcd9xxx_disable_clock_block(resmgr); return resmgr->clk_type != WCD9XXX_CLK_OFF; } static void wcd9xxx_restore_clock(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_clock_type type) { if (type != WCD9XXX_CLK_OFF) wcd9xxx_enable_clock_block(resmgr, type == WCD9XXX_CLK_RCO); } void wcd9xxx_resmgr_get_clk_block(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_clock_type type) { struct snd_soc_codec *codec = resmgr->codec; pr_debug("%s: current %d, requested %d, rco_users %d, mclk_users %d\n", __func__, resmgr->clk_type, type, resmgr->clk_rco_users, resmgr->clk_mclk_users); WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); switch (type) { case WCD9XXX_CLK_RCO: if (++resmgr->clk_rco_users == 1 && resmgr->clk_type == WCD9XXX_CLK_OFF) { /* enable RCO and switch to it */ wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_RCO); resmgr->clk_type = WCD9XXX_CLK_RCO; } else if (resmgr->clk_rco_users == 1 && resmgr->clk_type == WCD9XXX_CLK_MCLK && resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) { /* * Enable RCO but do not switch CLK MUX to RCO * unless ext_clk_users is 1, which indicates * EXT CLK is enabled for RCO calibration */ wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_CAL_RCO); if (resmgr->ext_clk_users == 1) { /* Notify */ wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_RCO_ON); /* CLK MUX to RCO */ snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x08, 0x08); resmgr->clk_type = WCD9XXX_CLK_RCO; wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_RCO_ON); } } break; case WCD9XXX_CLK_MCLK: if (++resmgr->clk_mclk_users == 1 && resmgr->clk_type == WCD9XXX_CLK_OFF) { /* switch to MCLK */ wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_MCLK); resmgr->clk_type = WCD9XXX_CLK_MCLK; } else if (resmgr->clk_mclk_users == 1 && resmgr->clk_type == WCD9XXX_CLK_RCO) { /* RCO to MCLK switch, with RCO still powered on */ if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) { wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_MCLK_ON); snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x40, 0x00); /* Enable clock buffer */ snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x01); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x08, 0x00); wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_MCLK_ON); } else { /* if RCO is enabled, switch from it */ WARN_ON(!(snd_soc_read(resmgr->codec, WCD9XXX_A_RC_OSC_FREQ) & 0x80)); /* disable clock block */ wcd9xxx_disable_clock_block(resmgr); /* switch to MCLK */ wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_MCLK); } resmgr->clk_type = WCD9XXX_CLK_MCLK; } break; default: pr_err("%s: Error, Invalid clock get request %d\n", __func__, type); break; } pr_debug("%s: leave\n", __func__); } void wcd9xxx_resmgr_put_clk_block(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_clock_type type) { struct snd_soc_codec *codec = resmgr->codec; pr_debug("%s: current %d, put %d\n", __func__, resmgr->clk_type, type); WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); switch (type) { case WCD9XXX_CLK_RCO: if (--resmgr->clk_rco_users == 0 && resmgr->clk_type == WCD9XXX_CLK_RCO) { wcd9xxx_disable_clock_block(resmgr); if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) { /* Powerdown RCO */ snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x80, 0x00); snd_soc_update_bits(codec, TOMTOM_A_BIAS_OSC_BG_CTL, 0x01, 0x00); } else { /* if RCO is enabled, switch from it */ if (snd_soc_read(resmgr->codec, WCD9XXX_A_RC_OSC_FREQ) & 0x80) { snd_soc_write(resmgr->codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02); wcd9xxx_resmgr_enable_config_mode( resmgr, 0); } } resmgr->clk_type = WCD9XXX_CLK_OFF; } break; case WCD9XXX_CLK_MCLK: if (--resmgr->clk_mclk_users == 0 && resmgr->clk_rco_users == 0) { wcd9xxx_disable_clock_block(resmgr); if ((resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) && (snd_soc_read(codec, TOMTOM_A_RCO_CTRL) & 0x80)) { /* powerdown RCO*/ snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x80, 0x00); snd_soc_update_bits(codec, TOMTOM_A_BIAS_OSC_BG_CTL, 0x01, 0x00); } resmgr->clk_type = WCD9XXX_CLK_OFF; } else if (resmgr->clk_mclk_users == 0 && resmgr->clk_rco_users) { if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) { if (!(snd_soc_read(codec, TOMTOM_A_RCO_CTRL) & 0x80)) { dev_dbg(codec->dev, "%s: Enabling RCO\n", __func__); wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_CAL_RCO); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x00); } else { wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_MCLK_OFF); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x08, 0x08); snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x00); wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_MCLK_OFF); /* CLK Mux changed to RCO, notify that * RCO is ON */ wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_RCO_ON); } } else { /* disable clock */ wcd9xxx_disable_clock_block(resmgr); /* switch to RCO */ wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_RCO); } resmgr->clk_type = WCD9XXX_CLK_RCO; } break; default: pr_err("%s: Error, Invalid clock get request %d\n", __func__, type); break; } WARN_ON(resmgr->clk_rco_users < 0); WARN_ON(resmgr->clk_mclk_users < 0); pr_debug("%s: new rco_users %d, mclk_users %d\n", __func__, resmgr->clk_rco_users, resmgr->clk_mclk_users); } /* * wcd9xxx_resmgr_get_clk_type() * Returns clk type that is currently enabled */ int wcd9xxx_resmgr_get_clk_type(struct wcd9xxx_resmgr *resmgr) { return resmgr->clk_type; } static void wcd9xxx_resmgr_update_cfilt_usage(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_cfilt_sel cfilt_sel, bool inc) { u16 micb_cfilt_reg; enum wcd9xxx_notify_event e_pre_on, e_post_off; struct snd_soc_codec *codec = resmgr->codec; switch (cfilt_sel) { case WCD9XXX_CFILT1_SEL: micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_1_CTL; e_pre_on = WCD9XXX_EVENT_PRE_CFILT_1_ON; e_post_off = WCD9XXX_EVENT_POST_CFILT_1_OFF; break; case WCD9XXX_CFILT2_SEL: micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_2_CTL; e_pre_on = WCD9XXX_EVENT_PRE_CFILT_2_ON; e_post_off = WCD9XXX_EVENT_POST_CFILT_2_OFF; break; case WCD9XXX_CFILT3_SEL: micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_3_CTL; e_pre_on = WCD9XXX_EVENT_PRE_CFILT_3_ON; e_post_off = WCD9XXX_EVENT_POST_CFILT_3_OFF; break; default: WARN(1, "Invalid CFILT selection %d\n", cfilt_sel); return; /* should not happen */ } if (inc) { if ((resmgr->cfilt_users[cfilt_sel]++) == 0) { /* Notify */ wcd9xxx_resmgr_notifier_call(resmgr, e_pre_on); /* Enable CFILT */ snd_soc_update_bits(codec, micb_cfilt_reg, 0x80, 0x80); } } else { /* * Check if count not zero, decrease * then check if zero, go ahead disable cfilter */ WARN(resmgr->cfilt_users[cfilt_sel] == 0, "Invalid CFILT use count 0\n"); if ((--resmgr->cfilt_users[cfilt_sel]) == 0) { /* Disable CFILT */ snd_soc_update_bits(codec, micb_cfilt_reg, 0x80, 0); /* Notify MBHC so MBHC can switch CFILT to fast mode */ wcd9xxx_resmgr_notifier_call(resmgr, e_post_off); } } } void wcd9xxx_resmgr_cfilt_get(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_cfilt_sel cfilt_sel) { return wcd9xxx_resmgr_update_cfilt_usage(resmgr, cfilt_sel, true); } void wcd9xxx_resmgr_cfilt_put(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_cfilt_sel cfilt_sel) { return wcd9xxx_resmgr_update_cfilt_usage(resmgr, cfilt_sel, false); } int wcd9xxx_resmgr_get_k_val(struct wcd9xxx_resmgr *resmgr, unsigned int cfilt_mv) { int rc = -EINVAL; unsigned int ldoh_v = resmgr->micbias_pdata->ldoh_v; unsigned min_mv, max_mv; switch (ldoh_v) { case WCD9XXX_LDOH_1P95_V: min_mv = 160; max_mv = 1800; break; case WCD9XXX_LDOH_2P35_V: min_mv = 200; max_mv = 2200; break; case WCD9XXX_LDOH_2P75_V: min_mv = 240; max_mv = 2600; break; case WCD9XXX_LDOH_3P0_V: min_mv = 260; max_mv = 2875; break; default: goto done; } if (cfilt_mv < min_mv || cfilt_mv > max_mv) goto done; for (rc = 4; rc <= 44; rc++) { min_mv = max_mv * (rc) / 44; if (min_mv >= cfilt_mv) { rc -= 4; break; } } done: return rc; } static void wcd9xxx_resmgr_cond_trigger_cond(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_resmgr_cond cond) { struct list_head *l; struct wcd9xxx_resmgr_cond_entry *e; bool set; pr_debug("%s: enter\n", __func__); /* update bit if cond isn't available or cond is set */ set = !test_bit(cond, &resmgr->cond_avail_flags) || !!test_bit(cond, &resmgr->cond_flags); list_for_each(l, &resmgr->update_bit_cond_h) { e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list); if (e->cond == cond) snd_soc_update_bits(resmgr->codec, e->reg, 1 << e->shift, (set ? !e->invert : e->invert) << e->shift); } pr_debug("%s: leave\n", __func__); } /* * wcd9xxx_regmgr_cond_register : notify resmgr conditions in the condbits are * avaliable and notified. * condbits : contains bitmask of enum wcd9xxx_resmgr_cond */ void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr, unsigned long condbits) { unsigned int cond; for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) { mutex_lock(&resmgr->update_bit_cond_lock); WARN(test_bit(cond, &resmgr->cond_avail_flags), "Condition 0x%0x is already registered\n", cond); set_bit(cond, &resmgr->cond_avail_flags); wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond); mutex_unlock(&resmgr->update_bit_cond_lock); pr_debug("%s: Condition 0x%x is registered\n", __func__, cond); } } void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr, unsigned long condbits) { unsigned int cond; for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) { mutex_lock(&resmgr->update_bit_cond_lock); WARN(!test_bit(cond, &resmgr->cond_avail_flags), "Condition 0x%0x isn't registered\n", cond); clear_bit(cond, &resmgr->cond_avail_flags); wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond); mutex_unlock(&resmgr->update_bit_cond_lock); pr_debug("%s: Condition 0x%x is deregistered\n", __func__, cond); } } void wcd9xxx_resmgr_cond_update_cond(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_resmgr_cond cond, bool set) { mutex_lock(&resmgr->update_bit_cond_lock); if ((set && !test_and_set_bit(cond, &resmgr->cond_flags)) || (!set && test_and_clear_bit(cond, &resmgr->cond_flags))) { pr_debug("%s: Resource %d condition changed to %s\n", __func__, cond, set ? "set" : "clear"); wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond); } mutex_unlock(&resmgr->update_bit_cond_lock); } int wcd9xxx_resmgr_add_cond_update_bits(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_resmgr_cond cond, unsigned short reg, int shift, bool invert) { struct wcd9xxx_resmgr_cond_entry *entry; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->cond = cond; entry->reg = reg; entry->shift = shift; entry->invert = invert; mutex_lock(&resmgr->update_bit_cond_lock); list_add_tail(&entry->list, &resmgr->update_bit_cond_h); wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond); mutex_unlock(&resmgr->update_bit_cond_lock); return 0; } /* * wcd9xxx_resmgr_rm_cond_update_bits : * Clear bit and remove from the conditional bit update list */ int wcd9xxx_resmgr_rm_cond_update_bits(struct wcd9xxx_resmgr *resmgr, enum wcd9xxx_resmgr_cond cond, unsigned short reg, int shift, bool invert) { struct list_head *l, *next; struct wcd9xxx_resmgr_cond_entry *e = NULL; pr_debug("%s: enter\n", __func__); mutex_lock(&resmgr->update_bit_cond_lock); list_for_each_safe(l, next, &resmgr->update_bit_cond_h) { e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list); if (e->reg == reg && e->shift == shift && e->invert == invert) { snd_soc_update_bits(resmgr->codec, e->reg, 1 << e->shift, e->invert << e->shift); list_del(&e->list); mutex_unlock(&resmgr->update_bit_cond_lock); kfree(e); return 0; } } mutex_unlock(&resmgr->update_bit_cond_lock); pr_err("%s: Cannot find update bit entry reg 0x%x, shift %d\n", __func__, e ? e->reg : 0, e ? e->shift : 0); return -EINVAL; } int wcd9xxx_resmgr_register_notifier(struct wcd9xxx_resmgr *resmgr, struct notifier_block *nblock) { return blocking_notifier_chain_register(&resmgr->notifier, nblock); } int wcd9xxx_resmgr_unregister_notifier(struct wcd9xxx_resmgr *resmgr, struct notifier_block *nblock) { return blocking_notifier_chain_unregister(&resmgr->notifier, nblock); } int wcd9xxx_resmgr_init(struct wcd9xxx_resmgr *resmgr, struct snd_soc_codec *codec, struct wcd9xxx_core_resource *core_res, struct wcd9xxx_pdata *pdata, struct wcd9xxx_micbias_setting *micbias_pdata, struct wcd9xxx_reg_address *reg_addr, const struct wcd9xxx_resmgr_cb *resmgr_cb, enum wcd9xxx_cdc_type cdc_type) { WARN(ARRAY_SIZE(wcd9xxx_event_string) != WCD9XXX_EVENT_LAST + 1, "Event string table isn't up to date!, %zd != %d\n", ARRAY_SIZE(wcd9xxx_event_string), WCD9XXX_EVENT_LAST + 1); resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF; resmgr->codec = codec; resmgr->codec_type = cdc_type; /* This gives access of core handle to lock/unlock suspend */ resmgr->core_res = core_res; resmgr->pdata = pdata; resmgr->micbias_pdata = micbias_pdata; resmgr->reg_addr = reg_addr; resmgr->resmgr_cb = resmgr_cb; INIT_LIST_HEAD(&resmgr->update_bit_cond_h); BLOCKING_INIT_NOTIFIER_HEAD(&resmgr->notifier); mutex_init(&resmgr->codec_resource_lock); mutex_init(&resmgr->codec_bg_clk_lock); mutex_init(&resmgr->update_bit_cond_lock); return 0; } void wcd9xxx_resmgr_deinit(struct wcd9xxx_resmgr *resmgr) { mutex_destroy(&resmgr->update_bit_cond_lock); mutex_destroy(&resmgr->codec_bg_clk_lock); mutex_destroy(&resmgr->codec_resource_lock); } void wcd9xxx_resmgr_bcl_lock(struct wcd9xxx_resmgr *resmgr) { mutex_lock(&resmgr->codec_resource_lock); } void wcd9xxx_resmgr_bcl_unlock(struct wcd9xxx_resmgr *resmgr) { mutex_unlock(&resmgr->codec_resource_lock); } MODULE_DESCRIPTION("wcd9xxx resmgr module"); MODULE_LICENSE("GPL v2");
gpl-2.0
londbell/ZTE_U988S_JellyBean-4.2.2-Kernel-3.4.35
drivers/video/tegra/host/gr3d/gr3d_t114.c
50
15116
/* * drivers/video/tegra/host/t20/3dctx_t114.c * * Tegra Graphics Host 3d hardware context * * Copyright (c) 2011-2012 NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "nvhost_hwctx.h" #include "nvhost_channel.h" #include "dev.h" #include "host1x/host1x02_hardware.h" #include "gr3d.h" #include "chip_support.h" #include "nvhost_memmgr.h" #include "scale3d_actmon.h" #include "nvhost_job.h" #include "nvhost_acm.h" #include "class_ids.h" #include <linux/slab.h> #include <linux/scatterlist.h> static const struct hwctx_reginfo ctxsave_regs_3d_per_pipe[] = { HWCTX_REGINFO(0xc30, 1, DIRECT), HWCTX_REGINFO(0xc40, 1, DIRECT), HWCTX_REGINFO(0xc50, 1, DIRECT) }; static const struct hwctx_reginfo ctxsave_regs_3d_global[] = { /* bug 962360. Reg 0xe44 has to be the first one to be restored*/ HWCTX_REGINFO_RST(0x40e, 1, DIRECT, 0xe44), HWCTX_REGINFO(0xe00, 35, DIRECT), HWCTX_REGINFO(0xe25, 2, DIRECT), HWCTX_REGINFO(0xe28, 2, DIRECT), HWCTX_REGINFO(0x001, 2, DIRECT), HWCTX_REGINFO(0x00c, 10, DIRECT), HWCTX_REGINFO(0x100, 34, DIRECT), HWCTX_REGINFO(0x124, 2, DIRECT), HWCTX_REGINFO(0x200, 5, DIRECT), HWCTX_REGINFO(0x205, 1024, INDIRECT), HWCTX_REGINFO(0x207, 1120, INDIRECT), HWCTX_REGINFO(0x209, 1, DIRECT), HWCTX_REGINFO(0x20b, 1, DIRECT), HWCTX_REGINFO(0x300, 64, DIRECT), HWCTX_REGINFO(0x343, 25, DIRECT), HWCTX_REGINFO(0x363, 2, DIRECT), HWCTX_REGINFO(0x400, 3, DIRECT), /* bug 976976 requires reg 0x403 to be restored before reg 0xe45 */ /* bug 972588 requires reg 0x403 to be restored with reg 0x411's value */ HWCTX_REGINFO_RST(0x411, 1, DIRECT, 0x403), HWCTX_REGINFO(0x404, 15, DIRECT), /* bug 955371 requires reg 0x7e0 to be restored with 0x410,s value. bug 982750 requires reg 0x7e0 to be restored before 0x804. note: 0x803 is the offset reg for 0x804 */ HWCTX_REGINFO_RST(0x410, 1, DIRECT, 0x7e0), HWCTX_REGINFO(0x414, 7, DIRECT), HWCTX_REGINFO(0x434, 1, DIRECT), HWCTX_REGINFO(0x500, 4, DIRECT), HWCTX_REGINFO(0x520, 32, DIRECT), HWCTX_REGINFO(0x540, 64, INDIRECT), HWCTX_REGINFO(0x545, 1, DIRECT), HWCTX_REGINFO(0x547, 1, DIRECT), HWCTX_REGINFO(0x548, 64, INDIRECT), /* bug 951938 requires that reg 601 should not be the last reg to be saved */ HWCTX_REGINFO(0x600, 16, INDIRECT_4X), HWCTX_REGINFO(0x603, 128, INDIRECT), HWCTX_REGINFO(0x608, 4, DIRECT), HWCTX_REGINFO(0x60e, 1, DIRECT), HWCTX_REGINFO(0x700, 64, INDIRECT), HWCTX_REGINFO(0x710, 50, DIRECT), HWCTX_REGINFO(0x750, 16, DIRECT), HWCTX_REGINFO(0x770, 48, DIRECT), HWCTX_REGINFO(0x7e0, 1, DIRECT), HWCTX_REGINFO(0x800, 64, INDIRECT), /* bug 982750 requires 0x804 to be restored after reg 0x7e0 */ HWCTX_REGINFO(0x803, 1024, INDIRECT), HWCTX_REGINFO(0x805, 64, INDIRECT), HWCTX_REGINFO(0x807, 1, DIRECT), HWCTX_REGINFO(0x820, 32, DIRECT), HWCTX_REGINFO(0x900, 64, INDIRECT), HWCTX_REGINFO(0x902, 2, DIRECT), HWCTX_REGINFO(0x907, 1, DIRECT), HWCTX_REGINFO(0x90a, 1, DIRECT), HWCTX_REGINFO(0xa02, 10, DIRECT), HWCTX_REGINFO(0xe2a, 1, DIRECT), /* bug 976976 requires reg 0xe45 to be restored after reg 0x403 */ /* bug 972588 requires reg 0x403 to be restored with reg 0x411's value */ HWCTX_REGINFO_RST(0x411, 1, DIRECT, 0xe45), HWCTX_REGINFO(0xe50, 49, DIRECT), /* bug 930456 requires reg 0xe2b to be restored with 0x126's value */ HWCTX_REGINFO_RST(0x126, 1, DIRECT, 0xe2b), }; #define SAVE_BEGIN_V1_SIZE (1 + RESTORE_BEGIN_SIZE) #define SAVE_DIRECT_V1_SIZE (4 + RESTORE_DIRECT_SIZE) #define SAVE_INDIRECT_V1_SIZE (6 + RESTORE_INDIRECT_SIZE) #define SAVE_END_V1_SIZE (8 + RESTORE_END_SIZE) #define SAVE_INCRS 3 #define RESTORE_BEGIN_SIZE 4 #define RESTORE_DIRECT_SIZE 1 #define RESTORE_INDIRECT_SIZE 2 #define RESTORE_END_SIZE 1 #ifdef CONFIG_TEGRA_FPGA_PLATFORM #define NUM_3D_PIXEL_PIPES 2 #else #define NUM_3D_PIXEL_PIPES 4 #endif struct save_info { u32 *ptr; unsigned int save_count; unsigned int restore_count; unsigned int save_incrs; unsigned int restore_incrs; }; /*** save ***/ static void save_push_v1(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) { struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx); /* wait for 3d idle */ nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), nvhost_opcode_imm_incr_syncpt( host1x_uclass_incr_syncpt_cond_op_done_v(), p->syncpt)); nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, host1x_uclass_wait_syncpt_base_r(), 1), nvhost_class_host_wait_syncpt_base(p->syncpt, p->waitbase, 1)); /* back to 3d */ nvhost_cdma_push(cdma, nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), NVHOST_OPCODE_NOOP); /* invalidate the FDC to prevent cache-coherency issues across GPUs note that we assume FDC_CONTROL_0 is left in the reset state by all contexts. the invalidate bit will clear itself, so the register should be unchanged after this */ /* bug 990395 T114 HW no longer can automatically clear the invalidate bit. Luckily that the ctx switching always happens on the push buffer boundary, and 3d driver inserts a FDC flush & invalidate & clear the invalidate bit in the beginning of the each push buffer. So we do not need to explicitly clear the invalidate bit here. */ nvhost_cdma_push(cdma, nvhost_opcode_imm(AR3D_FDC_CONTROL_0, AR3D_FDC_CONTROL_0_RESET_VAL | AR3D_FDC_CONTROL_0_INVALIDATE), nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1)); /* bug 972588 requires SW to clear the reg 0x403 and 0xe45 */ nvhost_cdma_push(cdma, nvhost_opcode_imm(0xe45, 0), nvhost_opcode_imm(0x403, 0)); nvhost_cdma_push(cdma, nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1), ctx->restore_phys); /* gather the save buffer */ nvhost_cdma_push_gather(cdma, nvhost_get_host(nctx->channel->dev)->memmgr, p->save_buf, 0, nvhost_opcode_gather(p->save_size), p->save_phys); } static void save_begin_v1(struct host1x_hwctx_handler *p, u32 *ptr) { ptr[0] = nvhost_opcode_nonincr(AR3D_PIPEALIAS_DW_MEMORY_OUTPUT_DATA, RESTORE_BEGIN_SIZE); nvhost_3dctx_restore_begin(p, ptr + 1); ptr += RESTORE_BEGIN_SIZE; } static void save_direct_v1(u32 *ptr, u32 start_reg, u32 count, u32 rst_reg, unsigned int pipe) { ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, (AR3D_PIPEALIAS_DW_MEMORY_OUTPUT_DATA + pipe), 1); nvhost_3dctx_restore_direct(ptr + 1, rst_reg, count); ptr += RESTORE_DIRECT_SIZE; ptr[1] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, host1x_uclass_indoff_r(), 1); ptr[2] = nvhost_class_host_indoff_reg_read( host1x_uclass_indoff_indmodid_gr3d_v(), start_reg, true); /* TODO could do this in the setclass if count < 6 */ ptr[3] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count); } static void save_indirect_v1(u32 *ptr, u32 offset_reg, u32 offset, u32 data_reg, u32 count, u32 rst_reg, u32 rst_data_reg, unsigned int pipe) { ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); ptr[1] = nvhost_opcode_nonincr( (AR3D_PIPEALIAS_DW_MEMORY_OUTPUT_DATA + pipe), RESTORE_INDIRECT_SIZE); nvhost_3dctx_restore_indirect(ptr + 2, rst_reg, offset, rst_data_reg, count); ptr += RESTORE_INDIRECT_SIZE; ptr[2] = nvhost_opcode_imm(offset_reg, offset); ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, host1x_uclass_indoff_r(), 1); ptr[4] = nvhost_class_host_indoff_reg_read( host1x_uclass_indoff_indmodid_gr3d_v(), data_reg, false); ptr[5] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count); } static void save_end_v1(struct host1x_hwctx_handler *p, u32 *ptr) { /* write end of restore buffer */ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, AR3D_PIPEALIAS_DW_MEMORY_OUTPUT_DATA, 1); nvhost_3dctx_restore_end(p, ptr + 1); ptr += RESTORE_END_SIZE; /* op_done syncpt incr to flush FDC */ ptr[1] = nvhost_opcode_imm_incr_syncpt( host1x_uclass_incr_syncpt_cond_op_done_v(), p->syncpt); /* host wait for that syncpt incr, and advance the wait base */ ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, host1x_uclass_wait_syncpt_base_r(), nvhost_mask2( host1x_uclass_wait_syncpt_base_r(), host1x_uclass_incr_syncpt_base_r())); ptr[3] = nvhost_class_host_wait_syncpt_base(p->syncpt, p->waitbase, p->save_incrs - 1); ptr[4] = nvhost_class_host_incr_syncpt_base(p->waitbase, p->save_incrs); /* set class back to 3d */ ptr[5] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); /* send reg reads back to host */ ptr[6] = nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 0); /* final syncpt increment to release waiters */ ptr[7] = nvhost_opcode_imm(0, p->syncpt); } static void setup_save_regs(struct save_info *info, const struct hwctx_reginfo *regs, unsigned int nr_regs, unsigned int pipe) { const struct hwctx_reginfo *rend = regs + nr_regs; u32 *ptr = info->ptr; unsigned int save_count = info->save_count; unsigned int restore_count = info->restore_count; for ( ; regs != rend; ++regs) { u32 offset = regs->offset + pipe; u32 count = regs->count; u32 rstoff = regs->rst_off + pipe; u32 indoff = offset + 1; u32 indrstoff = rstoff + 1; switch (regs->type) { case HWCTX_REGINFO_DIRECT: if (ptr) { save_direct_v1(ptr, offset, count, rstoff, pipe); ptr += SAVE_DIRECT_V1_SIZE; } save_count += SAVE_DIRECT_V1_SIZE; restore_count += RESTORE_DIRECT_SIZE; break; case HWCTX_REGINFO_INDIRECT_4X: ++indoff; ++indrstoff; /* fall through */ case HWCTX_REGINFO_INDIRECT: if (ptr) { save_indirect_v1(ptr, offset, 0, indoff, count, rstoff, indrstoff, pipe); ptr += SAVE_INDIRECT_V1_SIZE; } save_count += SAVE_INDIRECT_V1_SIZE; restore_count += RESTORE_INDIRECT_SIZE; break; } if (ptr) { /* SAVE cases only: reserve room for incoming data */ u32 k = 0; /* * Create a signature pattern for indirect data (which * will be overwritten by true incoming data) for * better deducing where we are in a long command * sequence, when given only a FIFO snapshot for debug * purposes. */ for (k = 0; k < count; k++) *(ptr + k) = 0xd000d000 | (offset << 16) | k; ptr += count; } save_count += count; restore_count += count; } info->ptr = ptr; info->save_count = save_count; info->restore_count = restore_count; } static void incr_mem_output_pointer(struct save_info *info, unsigned int pipe, unsigned int incr) { unsigned int i; u32 *ptr = info->ptr; if (ptr) { *ptr = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); ptr++; for (i = 0; i < incr; i++) *(ptr + i) = nvhost_opcode_imm( (AR3D_PIPEALIAS_DW_MEMORY_OUTPUT_INCR + pipe), 1); ptr += incr; } info->ptr = ptr; info->save_count += incr+1; } static void setup_save(struct host1x_hwctx_handler *p, u32 *ptr) { int pipe, i; unsigned int old_restore_count, incr_count; struct save_info info = { ptr, SAVE_BEGIN_V1_SIZE, RESTORE_BEGIN_SIZE, SAVE_INCRS, 1 }; if (info.ptr) { save_begin_v1(p, info.ptr); info.ptr += SAVE_BEGIN_V1_SIZE; } /* save regs for per pixel pipe, this has to be before the global * one. Advance the rest of the pipes' output pointer to match with * the pipe 0's. */ for (pipe = 1; pipe < NUM_3D_PIXEL_PIPES; pipe++) incr_mem_output_pointer(&info, pipe, RESTORE_BEGIN_SIZE); for (pipe = NUM_3D_PIXEL_PIPES - 1; pipe >= 0; pipe--) { old_restore_count = info.restore_count; setup_save_regs(&info, ctxsave_regs_3d_per_pipe, ARRAY_SIZE(ctxsave_regs_3d_per_pipe), (unsigned int) pipe); /* Advance the rest of the pipes' output pointer to match with * the current pipe's one. */ incr_count = info.restore_count - old_restore_count; for (i = 0; i < pipe; i++) incr_mem_output_pointer(&info, (unsigned int) i, incr_count); } /* save regs for global. Use pipe 0 to do the save */ setup_save_regs(&info, ctxsave_regs_3d_global, ARRAY_SIZE(ctxsave_regs_3d_global), 0); if (info.ptr) { save_end_v1(p, info.ptr); info.ptr += SAVE_END_V1_SIZE; } wmb(); p->save_size = info.save_count + SAVE_END_V1_SIZE; p->restore_size = info.restore_count + RESTORE_END_SIZE; p->save_incrs = info.save_incrs; p->save_thresh = p->save_incrs; p->restore_incrs = info.restore_incrs; } /*** ctx3d ***/ static struct nvhost_hwctx *ctx3d_alloc_v1(struct nvhost_hwctx_handler *h, struct nvhost_channel *ch) { struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h); struct host1x_hwctx *ctx = nvhost_3dctx_alloc_common(p, ch, false); if (ctx) return &ctx->hwctx; else return NULL; } struct nvhost_hwctx_handler *nvhost_gr3d_t114_ctxhandler_init( u32 syncpt, u32 waitbase, struct nvhost_channel *ch) { struct mem_mgr *memmgr; u32 *save_ptr; struct host1x_hwctx_handler *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; memmgr = nvhost_get_host(ch->dev)->memmgr; p->syncpt = syncpt; p->waitbase = waitbase; setup_save(p, NULL); p->save_buf = mem_op().alloc(memmgr, p->save_size * 4, 32, mem_mgr_flag_write_combine); if (IS_ERR_OR_NULL(p->save_buf)) goto fail_alloc; save_ptr = mem_op().mmap(p->save_buf); if (IS_ERR_OR_NULL(save_ptr)) goto fail_mmap; p->save_sgt = mem_op().pin(memmgr, p->save_buf); if (IS_ERR_OR_NULL(p->save_sgt)) goto fail_pin; p->save_phys = sg_dma_address(p->save_sgt->sgl); setup_save(p, save_ptr); mem_op().munmap(p->save_buf, save_ptr); p->save_slots = 5; p->h.alloc = ctx3d_alloc_v1; p->h.save_push = save_push_v1; p->h.save_service = NULL; p->h.get = nvhost_3dctx_get; p->h.put = nvhost_3dctx_put; return &p->h; fail_pin: mem_op().munmap(p->save_buf, save_ptr); fail_mmap: mem_op().put(memmgr, p->save_buf); fail_alloc: kfree(p); return NULL; } void nvhost_gr3d_t114_init(struct platform_device *dev) { if (actmon_op().init) actmon_op().init(nvhost_get_host(dev)); } void nvhost_gr3d_t114_deinit(struct platform_device *dev) { if (actmon_op().deinit) actmon_op().deinit(nvhost_get_host(dev)); } int nvhost_gr3d_t114_prepare_power_off(struct platform_device *dev) { if (actmon_op().deinit) actmon_op().deinit(nvhost_get_host(dev)); return nvhost_gr3d_prepare_power_off(dev); } void nvhost_gr3d_t114_finalize_power_on(struct platform_device *dev) { /* actmon needs to be reinitialized when we come back from * power gated state */ if (actmon_op().init) actmon_op().init(nvhost_get_host(dev)); }
gpl-2.0
wenboqiu/TestOpenGL
TestOpenGL/cocos2d/cocos/2d/CCActionCamera.cpp
50
6503
/**************************************************************************** Copyright (c) 2008-2010 Ricardo Quesada Copyright (c) 2010-2012 cocos2d-x.org Copyright (c) 2011 Zynga Inc. Copyright (c) 2013-2014 Chukong Technologies Inc. http://www.cocos2d-x.org Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ****************************************************************************/ #include "2d/CCActionCamera.h" #include "2d/CCNode.h" #include "platform/CCStdC.h" NS_CC_BEGIN // // CameraAction // ActionCamera::ActionCamera() : _center(0, 0, 0) , _eye(0, 0, FLT_EPSILON) , _up(0, 1, 0) { } void ActionCamera::startWithTarget(Node *target) { ActionInterval::startWithTarget(target); } ActionCamera* ActionCamera::clone() const { // no copy constructor auto a = new (std::nothrow) ActionCamera(); a->autorelease(); return a; } ActionCamera * ActionCamera::reverse() const { // FIXME: This conversion isn't safe. return (ActionCamera*)ReverseTime::create(const_cast<ActionCamera*>(this)); } void ActionCamera::restore() { _center.setZero(); _eye.set(0.0f, 0.0f, FLT_EPSILON); _up.set(0.0f, 1.0f, 0.0f); } void ActionCamera::setEye(const Vec3& eye) { _eye = eye; updateTransform(); } void ActionCamera::setEye(float x, float y, float z) { _eye.set(x, y, z); updateTransform(); } void ActionCamera::setCenter(const Vec3& center) { _center = center; updateTransform(); } void ActionCamera::setUp(const Vec3& up) { _up = up; updateTransform(); } void ActionCamera::updateTransform() { Mat4 lookupMatrix; Mat4::createLookAt(_eye.x, _eye.y, _eye.z, _center.x, _center.y, _center.z, _up.x, _up.y, _up.z, &lookupMatrix); Vec2 anchorPoint = _target->getAnchorPointInPoints(); bool needsTranslation = !anchorPoint.isZero(); Mat4 mv = Mat4::IDENTITY; if(needsTranslation) { Mat4 t; Mat4::createTranslation(anchorPoint.x, anchorPoint.y, 0, &t); mv = mv * t; } mv = mv * lookupMatrix; if(needsTranslation) { Mat4 t; Mat4::createTranslation(-anchorPoint.x, -anchorPoint.y, 0, &t); mv = mv * t; } // FIXME: Using the AdditionalTransform is a complete hack. // This should be done by multipliying the lookup-Matrix with the Node's MV matrix // And then setting the result as the new MV matrix // But that operation needs to be done after all the 'updates'. // So the Director should emit an 'director_after_update' event. // And this object should listen to it _target->setAdditionalTransform(&mv); } // // OrbitCamera // OrbitCamera::OrbitCamera() : _radius(0.0) , _deltaRadius(0.0) , _angleZ(0.0) , _deltaAngleZ(0.0) , _angleX(0.0) , _deltaAngleX(0.0) , _radZ(0.0) , _radDeltaZ(0.0) , _radX(0.0) , _radDeltaX(0.0) { } OrbitCamera::~OrbitCamera() { } OrbitCamera * OrbitCamera::create(float t, float radius, float deltaRadius, float angleZ, float deltaAngleZ, float angleX, float deltaAngleX) { OrbitCamera * obitCamera = new (std::nothrow) OrbitCamera(); if(obitCamera->initWithDuration(t, radius, deltaRadius, angleZ, deltaAngleZ, angleX, deltaAngleX)) { obitCamera->autorelease(); return obitCamera; } CC_SAFE_DELETE(obitCamera); return nullptr; } OrbitCamera* OrbitCamera::clone() const { // no copy constructor auto a = new (std::nothrow) OrbitCamera(); a->initWithDuration(_duration, _radius, _deltaRadius, _angleZ, _deltaAngleZ, _angleX, _deltaAngleX); a->autorelease(); return a; } bool OrbitCamera::initWithDuration(float t, float radius, float deltaRadius, float angleZ, float deltaAngleZ, float angleX, float deltaAngleX) { if ( ActionInterval::initWithDuration(t) ) { _radius = radius; _deltaRadius = deltaRadius; _angleZ = angleZ; _deltaAngleZ = deltaAngleZ; _angleX = angleX; _deltaAngleX = deltaAngleX; _radDeltaZ = (float)CC_DEGREES_TO_RADIANS(deltaAngleZ); _radDeltaX = (float)CC_DEGREES_TO_RADIANS(deltaAngleX); return true; } return false; } void OrbitCamera::startWithTarget(Node *target) { ActionCamera::startWithTarget(target); float r, zenith, azimuth; this->sphericalRadius(&r, &zenith, &azimuth); if( isnan(_radius) ) _radius = r; if( isnan(_angleZ) ) _angleZ = (float)CC_RADIANS_TO_DEGREES(zenith); if( isnan(_angleX) ) _angleX = (float)CC_RADIANS_TO_DEGREES(azimuth); _radZ = (float)CC_DEGREES_TO_RADIANS(_angleZ); _radX = (float)CC_DEGREES_TO_RADIANS(_angleX); } void OrbitCamera::update(float dt) { float r = (_radius + _deltaRadius * dt) * FLT_EPSILON; float za = _radZ + _radDeltaZ * dt; float xa = _radX + _radDeltaX * dt; float i = sinf(za) * cosf(xa) * r + _center.x; float j = sinf(za) * sinf(xa) * r + _center.y; float k = cosf(za) * r + _center.z; setEye(i,j,k); } void OrbitCamera::sphericalRadius(float *newRadius, float *zenith, float *azimuth) { float r; // radius float s; float x = _eye.x - _center.x; float y = _eye.y - _center.y; float z = _eye.z - _center.z; r = sqrtf( powf(x,2) + powf(y,2) + powf(z,2)); s = sqrtf( powf(x,2) + powf(y,2)); if( s == 0.0f ) s = FLT_EPSILON; if(r==0.0f) r = FLT_EPSILON; *zenith = acosf( z/r); if( x < 0 ) *azimuth= (float)M_PI - asinf(y/s); else *azimuth = asinf(y/s); *newRadius = r / FLT_EPSILON; } NS_CC_END
gpl-2.0
shengzhou/linux
drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
306
62466
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/types.h> #include <linux/module.h> #include <linux/if_ether.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/err.h> #include <linux/jiffies.h> #include <net/cfg80211.h> #include <brcmu_utils.h> #include <brcmu_wifi.h> #include "dhd.h" #include "dhd_dbg.h" #include "dhd_bus.h" #include "fwil.h" #include "fwil_types.h" #include "fweh.h" #include "fwsignal.h" #include "p2p.h" #include "wl_cfg80211.h" #include "proto.h" /** * DOC: Firmware Signalling * * Firmware can send signals to host and vice versa, which are passed in the * data packets using TLV based header. This signalling layer is on top of the * BDC bus protocol layer. */ /* * single definition for firmware-driver flow control tlv's. * * each tlv is specified by BRCMF_FWS_TLV_DEF(name, ID, length). * A length value 0 indicates variable length tlv. */ #define BRCMF_FWS_TLV_DEFLIST \ BRCMF_FWS_TLV_DEF(MAC_OPEN, 1, 1) \ BRCMF_FWS_TLV_DEF(MAC_CLOSE, 2, 1) \ BRCMF_FWS_TLV_DEF(MAC_REQUEST_CREDIT, 3, 2) \ BRCMF_FWS_TLV_DEF(TXSTATUS, 4, 4) \ BRCMF_FWS_TLV_DEF(PKTTAG, 5, 4) \ BRCMF_FWS_TLV_DEF(MACDESC_ADD, 6, 8) \ BRCMF_FWS_TLV_DEF(MACDESC_DEL, 7, 8) \ BRCMF_FWS_TLV_DEF(RSSI, 8, 1) \ BRCMF_FWS_TLV_DEF(INTERFACE_OPEN, 9, 1) \ BRCMF_FWS_TLV_DEF(INTERFACE_CLOSE, 10, 1) \ BRCMF_FWS_TLV_DEF(FIFO_CREDITBACK, 11, 6) \ BRCMF_FWS_TLV_DEF(PENDING_TRAFFIC_BMP, 12, 2) \ BRCMF_FWS_TLV_DEF(MAC_REQUEST_PACKET, 13, 3) \ BRCMF_FWS_TLV_DEF(HOST_REORDER_RXPKTS, 14, 10) \ BRCMF_FWS_TLV_DEF(TRANS_ID, 18, 6) \ BRCMF_FWS_TLV_DEF(COMP_TXSTATUS, 19, 1) \ BRCMF_FWS_TLV_DEF(FILLER, 255, 0) /* * enum brcmf_fws_tlv_type - definition of tlv identifiers. */ #define BRCMF_FWS_TLV_DEF(name, id, len) \ BRCMF_FWS_TYPE_ ## name = id, enum brcmf_fws_tlv_type { BRCMF_FWS_TLV_DEFLIST BRCMF_FWS_TYPE_INVALID }; #undef BRCMF_FWS_TLV_DEF /* * enum brcmf_fws_tlv_len - definition of tlv lengths. */ #define BRCMF_FWS_TLV_DEF(name, id, len) \ BRCMF_FWS_TYPE_ ## name ## _LEN = (len), enum brcmf_fws_tlv_len { BRCMF_FWS_TLV_DEFLIST }; #undef BRCMF_FWS_TLV_DEF #ifdef DEBUG /* * brcmf_fws_tlv_names - array of tlv names. */ #define BRCMF_FWS_TLV_DEF(name, id, len) \ { id, #name }, static struct { enum brcmf_fws_tlv_type id; const char *name; } brcmf_fws_tlv_names[] = { BRCMF_FWS_TLV_DEFLIST }; #undef BRCMF_FWS_TLV_DEF static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id) { int i; for (i = 0; i < ARRAY_SIZE(brcmf_fws_tlv_names); i++) if (brcmf_fws_tlv_names[i].id == id) return brcmf_fws_tlv_names[i].name; return "INVALID"; } #else static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id) { return "NODEBUG"; } #endif /* DEBUG */ /* * The PKTTAG tlv has additional bytes when firmware-signalling * mode has REUSESEQ flag set. */ #define BRCMF_FWS_TYPE_SEQ_LEN 2 /* * flags used to enable tlv signalling from firmware. */ #define BRCMF_FWS_FLAGS_RSSI_SIGNALS 0x0001 #define BRCMF_FWS_FLAGS_XONXOFF_SIGNALS 0x0002 #define BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS 0x0004 #define BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008 #define BRCMF_FWS_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010 #define BRCMF_FWS_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020 #define BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE 0x0040 #define BRCMF_FWS_MAC_DESC_TABLE_SIZE 32 #define BRCMF_FWS_MAC_DESC_ID_INVALID 0xff #define BRCMF_FWS_HOSTIF_FLOWSTATE_OFF 0 #define BRCMF_FWS_HOSTIF_FLOWSTATE_ON 1 #define BRCMF_FWS_FLOWCONTROL_HIWATER 128 #define BRCMF_FWS_FLOWCONTROL_LOWATER 64 #define BRCMF_FWS_PSQ_PREC_COUNT ((BRCMF_FWS_FIFO_COUNT + 1) * 2) #define BRCMF_FWS_PSQ_LEN 256 #define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST 0x01 #define BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED 0x02 #define BRCMF_FWS_RET_OK_NOSCHEDULE 0 #define BRCMF_FWS_RET_OK_SCHEDULE 1 #define BRCMF_FWS_MODE_REUSESEQ_SHIFT 3 /* seq reuse */ #define BRCMF_FWS_MODE_SET_REUSESEQ(x, val) ((x) = \ ((x) & ~(1 << BRCMF_FWS_MODE_REUSESEQ_SHIFT)) | \ (((val) & 1) << BRCMF_FWS_MODE_REUSESEQ_SHIFT)) #define BRCMF_FWS_MODE_GET_REUSESEQ(x) \ (((x) >> BRCMF_FWS_MODE_REUSESEQ_SHIFT) & 1) /** * enum brcmf_fws_skb_state - indicates processing state of skb. * * @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver. * @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue. * @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware. * @BRCMF_FWS_SKBSTATE_TIM: allocated for TIM update info. */ enum brcmf_fws_skb_state { BRCMF_FWS_SKBSTATE_NEW, BRCMF_FWS_SKBSTATE_DELAYED, BRCMF_FWS_SKBSTATE_SUPPRESSED, BRCMF_FWS_SKBSTATE_TIM }; /** * struct brcmf_skbuff_cb - control buffer associated with skbuff. * * @bus_flags: 2 bytes reserved for bus specific parameters * @if_flags: holds interface index and packet related flags. * @htod: host to device packet identifier (used in PKTTAG tlv). * @htod_seq: this 16-bit is original seq number for every suppress packet. * @state: transmit state of the packet. * @mac: descriptor related to destination for this packet. * * This information is stored in control buffer struct sk_buff::cb, which * provides 48 bytes of storage so this structure should not exceed that. */ struct brcmf_skbuff_cb { u16 bus_flags; u16 if_flags; u32 htod; u16 htod_seq; enum brcmf_fws_skb_state state; struct brcmf_fws_mac_descriptor *mac; }; /* * macro casting skbuff control buffer to struct brcmf_skbuff_cb. */ #define brcmf_skbcb(skb) ((struct brcmf_skbuff_cb *)((skb)->cb)) /* * sk_buff control if flags * * b[11] - packet sent upon firmware request. * b[10] - packet only contains signalling data. * b[9] - packet is a tx packet. * b[8] - packet used requested credit * b[7] - interface in AP mode. * b[3:0] - interface index. */ #define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK 0x0800 #define BRCMF_SKB_IF_FLAGS_REQUESTED_SHIFT 11 #define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_MASK 0x0400 #define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT 10 #define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK 0x0200 #define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT 9 #define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_MASK 0x0100 #define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_SHIFT 8 #define BRCMF_SKB_IF_FLAGS_IF_AP_MASK 0x0080 #define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT 7 #define BRCMF_SKB_IF_FLAGS_INDEX_MASK 0x000f #define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT 0 #define brcmf_skb_if_flags_set_field(skb, field, value) \ brcmu_maskset16(&(brcmf_skbcb(skb)->if_flags), \ BRCMF_SKB_IF_FLAGS_ ## field ## _MASK, \ BRCMF_SKB_IF_FLAGS_ ## field ## _SHIFT, (value)) #define brcmf_skb_if_flags_get_field(skb, field) \ brcmu_maskget16(brcmf_skbcb(skb)->if_flags, \ BRCMF_SKB_IF_FLAGS_ ## field ## _MASK, \ BRCMF_SKB_IF_FLAGS_ ## field ## _SHIFT) /* * sk_buff control packet identifier * * 32-bit packet identifier used in PKTTAG tlv from host to dongle. * * - Generated at the host (e.g. dhd) * - Seen as a generic sequence number by firmware except for the flags field. * * Generation : b[31] => generation number for this packet [host->fw] * OR, current generation number [fw->host] * Flags : b[30:27] => command, status flags * FIFO-AC : b[26:24] => AC-FIFO id * h-slot : b[23:8] => hanger-slot * freerun : b[7:0] => A free running counter */ #define BRCMF_SKB_HTOD_TAG_GENERATION_MASK 0x80000000 #define BRCMF_SKB_HTOD_TAG_GENERATION_SHIFT 31 #define BRCMF_SKB_HTOD_TAG_FLAGS_MASK 0x78000000 #define BRCMF_SKB_HTOD_TAG_FLAGS_SHIFT 27 #define BRCMF_SKB_HTOD_TAG_FIFO_MASK 0x07000000 #define BRCMF_SKB_HTOD_TAG_FIFO_SHIFT 24 #define BRCMF_SKB_HTOD_TAG_HSLOT_MASK 0x00ffff00 #define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT 8 #define BRCMF_SKB_HTOD_TAG_FREERUN_MASK 0x000000ff #define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0 #define brcmf_skb_htod_tag_set_field(skb, field, value) \ brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \ BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \ BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT, (value)) #define brcmf_skb_htod_tag_get_field(skb, field) \ brcmu_maskget32(brcmf_skbcb(skb)->htod, \ BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \ BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT) #define BRCMF_SKB_HTOD_SEQ_FROMFW_MASK 0x2000 #define BRCMF_SKB_HTOD_SEQ_FROMFW_SHIFT 13 #define BRCMF_SKB_HTOD_SEQ_FROMDRV_MASK 0x1000 #define BRCMF_SKB_HTOD_SEQ_FROMDRV_SHIFT 12 #define BRCMF_SKB_HTOD_SEQ_NR_MASK 0x0fff #define BRCMF_SKB_HTOD_SEQ_NR_SHIFT 0 #define brcmf_skb_htod_seq_set_field(skb, field, value) \ brcmu_maskset16(&(brcmf_skbcb(skb)->htod_seq), \ BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \ BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT, (value)) #define brcmf_skb_htod_seq_get_field(skb, field) \ brcmu_maskget16(brcmf_skbcb(skb)->htod_seq, \ BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \ BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT) #define BRCMF_FWS_TXSTAT_GENERATION_MASK 0x80000000 #define BRCMF_FWS_TXSTAT_GENERATION_SHIFT 31 #define BRCMF_FWS_TXSTAT_FLAGS_MASK 0x78000000 #define BRCMF_FWS_TXSTAT_FLAGS_SHIFT 27 #define BRCMF_FWS_TXSTAT_FIFO_MASK 0x07000000 #define BRCMF_FWS_TXSTAT_FIFO_SHIFT 24 #define BRCMF_FWS_TXSTAT_HSLOT_MASK 0x00FFFF00 #define BRCMF_FWS_TXSTAT_HSLOT_SHIFT 8 #define BRCMF_FWS_TXSTAT_FREERUN_MASK 0x000000FF #define BRCMF_FWS_TXSTAT_FREERUN_SHIFT 0 #define brcmf_txstatus_get_field(txs, field) \ brcmu_maskget32(txs, BRCMF_FWS_TXSTAT_ ## field ## _MASK, \ BRCMF_FWS_TXSTAT_ ## field ## _SHIFT) /* How long to defer borrowing in jiffies */ #define BRCMF_FWS_BORROW_DEFER_PERIOD (HZ / 10) /** * enum brcmf_fws_fifo - fifo indices used by dongle firmware. * * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background. * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic. * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic. * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic. * @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic. * @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only). * @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only). * @BRCMF_FWS_FIFO_COUNT: number of fifos. */ enum brcmf_fws_fifo { BRCMF_FWS_FIFO_FIRST, BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST, BRCMF_FWS_FIFO_AC_BE, BRCMF_FWS_FIFO_AC_VI, BRCMF_FWS_FIFO_AC_VO, BRCMF_FWS_FIFO_BCMC, BRCMF_FWS_FIFO_ATIM, BRCMF_FWS_FIFO_COUNT }; /** * enum brcmf_fws_txstatus - txstatus flag values. * * @BRCMF_FWS_TXSTATUS_DISCARD: * host is free to discard the packet. * @BRCMF_FWS_TXSTATUS_CORE_SUPPRESS: * 802.11 core suppressed the packet. * @BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS: * firmware suppress the packet as device is already in PS mode. * @BRCMF_FWS_TXSTATUS_FW_TOSSED: * firmware tossed the packet. * @BRCMF_FWS_TXSTATUS_HOST_TOSSED: * host tossed the packet. */ enum brcmf_fws_txstatus { BRCMF_FWS_TXSTATUS_DISCARD, BRCMF_FWS_TXSTATUS_CORE_SUPPRESS, BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS, BRCMF_FWS_TXSTATUS_FW_TOSSED, BRCMF_FWS_TXSTATUS_HOST_TOSSED }; enum brcmf_fws_fcmode { BRCMF_FWS_FCMODE_NONE, BRCMF_FWS_FCMODE_IMPLIED_CREDIT, BRCMF_FWS_FCMODE_EXPLICIT_CREDIT }; enum brcmf_fws_mac_desc_state { BRCMF_FWS_STATE_OPEN = 1, BRCMF_FWS_STATE_CLOSE }; /** * struct brcmf_fws_mac_descriptor - firmware signalling data per node/interface * * @occupied: slot is in use. * @mac_handle: handle for mac entry determined by firmware. * @interface_id: interface index. * @state: current state. * @suppressed: mac entry is suppressed. * @generation: generation bit. * @ac_bitmap: ac queue bitmap. * @requested_credit: credits requested by firmware. * @ea: ethernet address. * @seq: per-node free-running sequence. * @psq: power-save queue. * @transit_count: packet in transit to firmware. */ struct brcmf_fws_mac_descriptor { char name[16]; u8 occupied; u8 mac_handle; u8 interface_id; u8 state; bool suppressed; u8 generation; u8 ac_bitmap; u8 requested_credit; u8 requested_packet; u8 ea[ETH_ALEN]; u8 seq[BRCMF_FWS_FIFO_COUNT]; struct pktq psq; int transit_count; int suppr_transit_count; bool send_tim_signal; u8 traffic_pending_bmp; u8 traffic_lastreported_bmp; }; #define BRCMF_FWS_HANGER_MAXITEMS 1024 /** * enum brcmf_fws_hanger_item_state - state of hanger item. * * @BRCMF_FWS_HANGER_ITEM_STATE_FREE: item is free for use. * @BRCMF_FWS_HANGER_ITEM_STATE_INUSE: item is in use. * @BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED: item was suppressed. */ enum brcmf_fws_hanger_item_state { BRCMF_FWS_HANGER_ITEM_STATE_FREE = 1, BRCMF_FWS_HANGER_ITEM_STATE_INUSE, BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED }; /** * struct brcmf_fws_hanger_item - single entry for tx pending packet. * * @state: entry is either free or occupied. * @pkt: packet itself. */ struct brcmf_fws_hanger_item { enum brcmf_fws_hanger_item_state state; struct sk_buff *pkt; }; /** * struct brcmf_fws_hanger - holds packets awaiting firmware txstatus. * * @pushed: packets pushed to await txstatus. * @popped: packets popped upon handling txstatus. * @failed_to_push: packets that could not be pushed. * @failed_to_pop: packets that could not be popped. * @failed_slotfind: packets for which failed to find an entry. * @slot_pos: last returned item index for a free entry. * @items: array of hanger items. */ struct brcmf_fws_hanger { u32 pushed; u32 popped; u32 failed_to_push; u32 failed_to_pop; u32 failed_slotfind; u32 slot_pos; struct brcmf_fws_hanger_item items[BRCMF_FWS_HANGER_MAXITEMS]; }; struct brcmf_fws_macdesc_table { struct brcmf_fws_mac_descriptor nodes[BRCMF_FWS_MAC_DESC_TABLE_SIZE]; struct brcmf_fws_mac_descriptor iface[BRCMF_MAX_IFS]; struct brcmf_fws_mac_descriptor other; }; struct brcmf_fws_stats { u32 tlv_parse_failed; u32 tlv_invalid_type; u32 header_only_pkt; u32 header_pulls; u32 pkt2bus; u32 send_pkts[5]; u32 requested_sent[5]; u32 generic_error; u32 mac_update_failed; u32 mac_ps_update_failed; u32 if_update_failed; u32 packet_request_failed; u32 credit_request_failed; u32 rollback_success; u32 rollback_failed; u32 delayq_full_error; u32 supprq_full_error; u32 txs_indicate; u32 txs_discard; u32 txs_supp_core; u32 txs_supp_ps; u32 txs_tossed; u32 txs_host_tossed; u32 bus_flow_block; u32 fws_flow_block; }; struct brcmf_fws_info { struct brcmf_pub *drvr; spinlock_t spinlock; ulong flags; struct brcmf_fws_stats stats; struct brcmf_fws_hanger hanger; enum brcmf_fws_fcmode fcmode; bool fw_signals; bool bcmc_credit_check; struct brcmf_fws_macdesc_table desc; struct workqueue_struct *fws_wq; struct work_struct fws_dequeue_work; u32 fifo_enqpkt[BRCMF_FWS_FIFO_COUNT]; int fifo_credit[BRCMF_FWS_FIFO_COUNT]; int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1]; int deq_node_pos[BRCMF_FWS_FIFO_COUNT]; u32 fifo_credit_map; u32 fifo_delay_map; unsigned long borrow_defer_timestamp; bool bus_flow_blocked; bool creditmap_received; u8 mode; bool avoid_queueing; }; /* * brcmf_fws_prio2fifo - mapping from 802.1d priority to firmware fifo index. */ static const int brcmf_fws_prio2fifo[] = { BRCMF_FWS_FIFO_AC_BE, BRCMF_FWS_FIFO_AC_BK, BRCMF_FWS_FIFO_AC_BK, BRCMF_FWS_FIFO_AC_BE, BRCMF_FWS_FIFO_AC_VI, BRCMF_FWS_FIFO_AC_VI, BRCMF_FWS_FIFO_AC_VO, BRCMF_FWS_FIFO_AC_VO }; static int fcmode; module_param(fcmode, int, S_IRUSR); MODULE_PARM_DESC(fcmode, "mode of firmware signalled flow control"); #define BRCMF_FWS_TLV_DEF(name, id, len) \ case BRCMF_FWS_TYPE_ ## name: \ return len; /** * brcmf_fws_get_tlv_len() - returns defined length for given tlv id. * * @fws: firmware-signalling information. * @id: identifier of the TLV. * * Return: the specified length for the given TLV; Otherwise -EINVAL. */ static int brcmf_fws_get_tlv_len(struct brcmf_fws_info *fws, enum brcmf_fws_tlv_type id) { switch (id) { BRCMF_FWS_TLV_DEFLIST default: fws->stats.tlv_invalid_type++; break; } return -EINVAL; } #undef BRCMF_FWS_TLV_DEF static void brcmf_fws_lock(struct brcmf_fws_info *fws) __acquires(&fws->spinlock) { spin_lock_irqsave(&fws->spinlock, fws->flags); } static void brcmf_fws_unlock(struct brcmf_fws_info *fws) __releases(&fws->spinlock) { spin_unlock_irqrestore(&fws->spinlock, fws->flags); } static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg) { u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX); return ifidx == *(int *)arg; } static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, int ifidx) { bool (*matchfn)(struct sk_buff *, void *) = NULL; struct sk_buff *skb; int prec; if (ifidx != -1) matchfn = brcmf_fws_ifidx_match; for (prec = 0; prec < q->num_prec; prec++) { skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); while (skb) { brcmu_pkt_buf_free_skb(skb); skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); } } } static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger) { int i; memset(hanger, 0, sizeof(*hanger)); for (i = 0; i < ARRAY_SIZE(hanger->items); i++) hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; } static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h) { u32 i; i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS; while (i != h->slot_pos) { if (h->items[i].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { h->slot_pos = i; goto done; } i++; if (i == BRCMF_FWS_HANGER_MAXITEMS) i = 0; } brcmf_err("all slots occupied\n"); h->failed_slotfind++; i = BRCMF_FWS_HANGER_MAXITEMS; done: return i; } static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h, struct sk_buff *pkt, u32 slot_id) { if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) return -ENOENT; if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_FREE) { brcmf_err("slot is not free\n"); h->failed_to_push++; return -EINVAL; } h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE; h->items[slot_id].pkt = pkt; h->pushed++; return 0; } static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, u32 slot_id, struct sk_buff **pktout, bool remove_item) { if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) return -ENOENT; if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { brcmf_err("entry not in use\n"); h->failed_to_pop++; return -EINVAL; } *pktout = h->items[slot_id].pkt; if (remove_item) { h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; h->items[slot_id].pkt = NULL; h->popped++; } return 0; } static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, u32 slot_id) { if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) return -ENOENT; if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { brcmf_err("entry not in use\n"); return -EINVAL; } h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED; return 0; } static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws, bool (*fn)(struct sk_buff *, void *), int ifidx) { struct brcmf_fws_hanger *h = &fws->hanger; struct sk_buff *skb; int i; enum brcmf_fws_hanger_item_state s; for (i = 0; i < ARRAY_SIZE(h->items); i++) { s = h->items[i].state; if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE || s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { skb = h->items[i].pkt; if (fn == NULL || fn(skb, &ifidx)) { /* suppress packets freed from psq */ if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE) brcmu_pkt_buf_free_skb(skb); h->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; } } } } static void brcmf_fws_macdesc_set_name(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *desc) { if (desc == &fws->desc.other) strlcpy(desc->name, "MAC-OTHER", sizeof(desc->name)); else if (desc->mac_handle) scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d", desc->mac_handle, desc->interface_id); else scnprintf(desc->name, sizeof(desc->name), "MACIF:%d", desc->interface_id); } static void brcmf_fws_macdesc_init(struct brcmf_fws_mac_descriptor *desc, u8 *addr, u8 ifidx) { brcmf_dbg(TRACE, "enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx); desc->occupied = 1; desc->state = BRCMF_FWS_STATE_OPEN; desc->requested_credit = 0; desc->requested_packet = 0; /* depending on use may need ifp->bssidx instead */ desc->interface_id = ifidx; desc->ac_bitmap = 0xff; /* update this when handling APSD */ if (addr) memcpy(&desc->ea[0], addr, ETH_ALEN); } static void brcmf_fws_macdesc_deinit(struct brcmf_fws_mac_descriptor *desc) { brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id); desc->occupied = 0; desc->state = BRCMF_FWS_STATE_CLOSE; desc->requested_credit = 0; desc->requested_packet = 0; } static struct brcmf_fws_mac_descriptor * brcmf_fws_macdesc_lookup(struct brcmf_fws_info *fws, u8 *ea) { struct brcmf_fws_mac_descriptor *entry; int i; if (ea == NULL) return ERR_PTR(-EINVAL); entry = &fws->desc.nodes[0]; for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) { if (entry->occupied && !memcmp(entry->ea, ea, ETH_ALEN)) return entry; entry++; } return ERR_PTR(-ENOENT); } static struct brcmf_fws_mac_descriptor* brcmf_fws_macdesc_find(struct brcmf_fws_info *fws, struct brcmf_if *ifp, u8 *da) { struct brcmf_fws_mac_descriptor *entry = &fws->desc.other; bool multicast; multicast = is_multicast_ether_addr(da); /* Multicast destination, STA and P2P clients get the interface entry. * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations * have their own entry. */ if (multicast && ifp->fws_desc) { entry = ifp->fws_desc; goto done; } entry = brcmf_fws_macdesc_lookup(fws, da); if (IS_ERR(entry)) entry = ifp->fws_desc; done: return entry; } static bool brcmf_fws_macdesc_closed(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int fifo) { struct brcmf_fws_mac_descriptor *if_entry; bool closed; /* for unique destination entries the related interface * may be closed. */ if (entry->mac_handle) { if_entry = &fws->desc.iface[entry->interface_id]; if (if_entry->state == BRCMF_FWS_STATE_CLOSE) return true; } /* an entry is closed when the state is closed and * the firmware did not request anything. */ closed = entry->state == BRCMF_FWS_STATE_CLOSE && !entry->requested_credit && !entry->requested_packet; /* Or firmware does not allow traffic for given fifo */ return closed || !(entry->ac_bitmap & BIT(fifo)); } static void brcmf_fws_macdesc_cleanup(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int ifidx) { if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) { brcmf_fws_psq_flush(fws, &entry->psq, ifidx); entry->occupied = !!(entry->psq.len); } } static void brcmf_fws_bus_txq_cleanup(struct brcmf_fws_info *fws, bool (*fn)(struct sk_buff *, void *), int ifidx) { struct brcmf_fws_hanger_item *hi; struct pktq *txq; struct sk_buff *skb; int prec; u32 hslot; txq = brcmf_bus_gettxq(fws->drvr->bus_if); if (IS_ERR(txq)) { brcmf_dbg(TRACE, "no txq to clean up\n"); return; } for (prec = 0; prec < txq->num_prec; prec++) { skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx); while (skb) { hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); hi = &fws->hanger.items[hslot]; WARN_ON(skb != hi->pkt); hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; brcmu_pkt_buf_free_skb(skb); skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx); } } } static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx) { int i; struct brcmf_fws_mac_descriptor *table; bool (*matchfn)(struct sk_buff *, void *) = NULL; if (fws == NULL) return; if (ifidx != -1) matchfn = brcmf_fws_ifidx_match; /* cleanup individual nodes */ table = &fws->desc.nodes[0]; for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) brcmf_fws_macdesc_cleanup(fws, &table[i], ifidx); brcmf_fws_macdesc_cleanup(fws, &fws->desc.other, ifidx); brcmf_fws_bus_txq_cleanup(fws, matchfn, ifidx); brcmf_fws_hanger_cleanup(fws, matchfn, ifidx); } static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb) { struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; u8 *wlh; u16 data_offset = 0; u8 fillers; __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod); __le16 pktseq = cpu_to_le16(brcmf_skbcb(skb)->htod_seq); brcmf_dbg(TRACE, "enter: %s, idx=%d hslot=%d htod %X seq %X\n", entry->name, brcmf_skb_if_flags_get_field(skb, INDEX), (le32_to_cpu(pkttag) >> 8) & 0xffff, brcmf_skbcb(skb)->htod, brcmf_skbcb(skb)->htod_seq); if (entry->send_tim_signal) data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) data_offset += BRCMF_FWS_TYPE_SEQ_LEN; /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN; fillers = round_up(data_offset, 4) - data_offset; data_offset += fillers; skb_push(skb, data_offset); wlh = skb->data; wlh[0] = BRCMF_FWS_TYPE_PKTTAG; wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN; memcpy(&wlh[2], &pkttag, sizeof(pkttag)); if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) { wlh[1] += BRCMF_FWS_TYPE_SEQ_LEN; memcpy(&wlh[2 + BRCMF_FWS_TYPE_PKTTAG_LEN], &pktseq, sizeof(pktseq)); } wlh += wlh[1] + 2; if (entry->send_tim_signal) { entry->send_tim_signal = 0; wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP; wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; wlh[2] = entry->mac_handle; wlh[3] = entry->traffic_pending_bmp; brcmf_dbg(TRACE, "adding TIM info: handle %d bmp 0x%X\n", entry->mac_handle, entry->traffic_pending_bmp); wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2; entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; } if (fillers) memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers); return (u8)(data_offset >> 2); } static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int fifo, bool send_immediately) { struct sk_buff *skb; struct brcmf_skbuff_cb *skcb; s32 err; u32 len; u8 data_offset; int ifidx; /* check delayedQ and suppressQ in one call using bitmap */ if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0) entry->traffic_pending_bmp &= ~NBITVAL(fifo); else entry->traffic_pending_bmp |= NBITVAL(fifo); entry->send_tim_signal = false; if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) entry->send_tim_signal = true; if (send_immediately && entry->send_tim_signal && entry->state == BRCMF_FWS_STATE_CLOSE) { /* create a dummy packet and sent that. The traffic */ /* bitmap info will automatically be attached to that packet */ len = BRCMF_FWS_TYPE_PKTTAG_LEN + 2 + BRCMF_FWS_TYPE_SEQ_LEN + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 + 4 + fws->drvr->hdrlen; skb = brcmu_pkt_buf_get_skb(len); if (skb == NULL) return false; skb_pull(skb, len); skcb = brcmf_skbcb(skb); skcb->mac = entry; skcb->state = BRCMF_FWS_SKBSTATE_TIM; skcb->htod = 0; skcb->htod_seq = 0; data_offset = brcmf_fws_hdrpush(fws, skb); ifidx = brcmf_skb_if_flags_get_field(skb, INDEX); brcmf_fws_unlock(fws); err = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb); brcmf_fws_lock(fws); if (err) brcmu_pkt_buf_free_skb(skb); return true; } return false; } static void brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq, u8 if_id) { struct brcmf_if *ifp = fws->drvr->iflist[!if_id ? 0 : if_id + 1]; if (WARN_ON(!ifp)) return; if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER) brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, false); if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) { fws->stats.fws_flow_block++; brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true); } return; } static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi) { brcmf_dbg(CTL, "rssi %d\n", rssi); return 0; } static int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry, *existing; u8 mac_handle; u8 ifidx; u8 *addr; mac_handle = *data++; ifidx = *data++; addr = data; entry = &fws->desc.nodes[mac_handle & 0x1F]; if (type == BRCMF_FWS_TYPE_MACDESC_DEL) { if (entry->occupied) { brcmf_dbg(TRACE, "deleting %s mac %pM\n", entry->name, addr); brcmf_fws_lock(fws); brcmf_fws_macdesc_cleanup(fws, entry, -1); brcmf_fws_macdesc_deinit(entry); brcmf_fws_unlock(fws); } else fws->stats.mac_update_failed++; return 0; } existing = brcmf_fws_macdesc_lookup(fws, addr); if (IS_ERR(existing)) { if (!entry->occupied) { brcmf_fws_lock(fws); entry->mac_handle = mac_handle; brcmf_fws_macdesc_init(entry, addr, ifidx); brcmf_fws_macdesc_set_name(fws, entry); brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); brcmf_fws_unlock(fws); brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr); } else { fws->stats.mac_update_failed++; } } else { if (entry != existing) { brcmf_dbg(TRACE, "copy mac %s\n", existing->name); brcmf_fws_lock(fws); memcpy(entry, existing, offsetof(struct brcmf_fws_mac_descriptor, psq)); entry->mac_handle = mac_handle; brcmf_fws_macdesc_deinit(existing); brcmf_fws_macdesc_set_name(fws, entry); brcmf_fws_unlock(fws); brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name, addr); } else { brcmf_dbg(TRACE, "use existing\n"); WARN_ON(entry->mac_handle != mac_handle); /* TODO: what should we do here: continue, reinit, .. */ } } return 0; } static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry; u8 mac_handle; int ret; mac_handle = data[0]; entry = &fws->desc.nodes[mac_handle & 0x1F]; if (!entry->occupied) { fws->stats.mac_ps_update_failed++; return -ESRCH; } brcmf_fws_lock(fws); /* a state update should wipe old credits */ entry->requested_credit = 0; entry->requested_packet = 0; if (type == BRCMF_FWS_TYPE_MAC_OPEN) { entry->state = BRCMF_FWS_STATE_OPEN; ret = BRCMF_FWS_RET_OK_SCHEDULE; } else { entry->state = BRCMF_FWS_STATE_CLOSE; brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BK, false); brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BE, false); brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VI, false); brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true); ret = BRCMF_FWS_RET_OK_NOSCHEDULE; } brcmf_fws_unlock(fws); return ret; } static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry; u8 ifidx; int ret; ifidx = data[0]; if (ifidx >= BRCMF_MAX_IFS) { ret = -ERANGE; goto fail; } entry = &fws->desc.iface[ifidx]; if (!entry->occupied) { ret = -ESRCH; goto fail; } brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type, entry->name); brcmf_fws_lock(fws); switch (type) { case BRCMF_FWS_TYPE_INTERFACE_OPEN: entry->state = BRCMF_FWS_STATE_OPEN; ret = BRCMF_FWS_RET_OK_SCHEDULE; break; case BRCMF_FWS_TYPE_INTERFACE_CLOSE: entry->state = BRCMF_FWS_STATE_CLOSE; ret = BRCMF_FWS_RET_OK_NOSCHEDULE; break; default: ret = -EINVAL; brcmf_fws_unlock(fws); goto fail; } brcmf_fws_unlock(fws); return ret; fail: fws->stats.if_update_failed++; return ret; } static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry; entry = &fws->desc.nodes[data[1] & 0x1F]; if (!entry->occupied) { if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT) fws->stats.credit_request_failed++; else fws->stats.packet_request_failed++; return -ESRCH; } brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n", brcmf_fws_get_tlv_name(type), type, entry->name, data[0], data[2]); brcmf_fws_lock(fws); if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT) entry->requested_credit = data[0]; else entry->requested_packet = data[0]; entry->ac_bitmap = data[2]; brcmf_fws_unlock(fws); return BRCMF_FWS_RET_OK_SCHEDULE; } static void brcmf_fws_macdesc_use_req_credit(struct brcmf_fws_mac_descriptor *entry, struct sk_buff *skb) { if (entry->requested_credit > 0) { entry->requested_credit--; brcmf_skb_if_flags_set_field(skb, REQUESTED, 1); brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 1); if (entry->state != BRCMF_FWS_STATE_CLOSE) brcmf_err("requested credit set while mac not closed!\n"); } else if (entry->requested_packet > 0) { entry->requested_packet--; brcmf_skb_if_flags_set_field(skb, REQUESTED, 1); brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0); if (entry->state != BRCMF_FWS_STATE_CLOSE) brcmf_err("requested packet set while mac not closed!\n"); } else { brcmf_skb_if_flags_set_field(skb, REQUESTED, 0); brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0); } } static void brcmf_fws_macdesc_return_req_credit(struct sk_buff *skb) { struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; if ((brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) && (entry->state == BRCMF_FWS_STATE_CLOSE)) entry->requested_credit++; } static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, u8 fifo, u8 credits) { int lender_ac; int *borrowed; int *fifo_credit; if (!credits) return; fws->fifo_credit_map |= 1 << fifo; if ((fifo == BRCMF_FWS_FIFO_AC_BE) && (fws->credits_borrowed[0])) { for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0; lender_ac--) { borrowed = &fws->credits_borrowed[lender_ac]; if (*borrowed) { fws->fifo_credit_map |= (1 << lender_ac); fifo_credit = &fws->fifo_credit[lender_ac]; if (*borrowed >= credits) { *borrowed -= credits; *fifo_credit += credits; return; } else { credits -= *borrowed; *fifo_credit += *borrowed; *borrowed = 0; } } } } fws->fifo_credit[fifo] += credits; } static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws) { /* only schedule dequeue when there are credits for delayed traffic */ if ((fws->fifo_credit_map & fws->fifo_delay_map) || (!brcmf_fws_fc_active(fws) && fws->fifo_delay_map)) queue_work(fws->fws_wq, &fws->fws_dequeue_work); } static int brcmf_fws_enq(struct brcmf_fws_info *fws, enum brcmf_fws_skb_state state, int fifo, struct sk_buff *p) { int prec = 2 * fifo; u32 *qfull_stat = &fws->stats.delayq_full_error; struct brcmf_fws_mac_descriptor *entry; struct pktq *pq; struct sk_buff_head *queue; struct sk_buff *p_head; struct sk_buff *p_tail; u32 fr_new; u32 fr_compare; entry = brcmf_skbcb(p)->mac; if (entry == NULL) { brcmf_err("no mac descriptor found for skb %p\n", p); return -ENOENT; } brcmf_dbg(DATA, "enter: fifo %d skb %p\n", fifo, p); if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) { prec += 1; qfull_stat = &fws->stats.supprq_full_error; /* Fix out of order delivery of frames. Dont assume frame */ /* can be inserted at the end, but look for correct position */ pq = &entry->psq; if (pktq_full(pq) || pktq_pfull(pq, prec)) { *qfull_stat += 1; return -ENFILE; } queue = &pq->q[prec].skblist; p_head = skb_peek(queue); p_tail = skb_peek_tail(queue); fr_new = brcmf_skb_htod_tag_get_field(p, FREERUN); while (p_head != p_tail) { fr_compare = brcmf_skb_htod_tag_get_field(p_tail, FREERUN); /* be sure to handle wrap of 256 */ if (((fr_new > fr_compare) && ((fr_new - fr_compare) < 128)) || ((fr_new < fr_compare) && ((fr_compare - fr_new) > 128))) break; p_tail = skb_queue_prev(queue, p_tail); } /* Position found. Determine what to do */ if (p_tail == NULL) { /* empty list */ __skb_queue_tail(queue, p); } else { fr_compare = brcmf_skb_htod_tag_get_field(p_tail, FREERUN); if (((fr_new > fr_compare) && ((fr_new - fr_compare) < 128)) || ((fr_new < fr_compare) && ((fr_compare - fr_new) > 128))) { /* After tail */ __skb_queue_after(queue, p_tail, p); } else { /* Before tail */ __skb_insert(p, p_tail->prev, p_tail, queue); } } /* Complete the counters and statistics */ pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (u8) prec; } else if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) { *qfull_stat += 1; return -ENFILE; } /* increment total enqueued packet count */ fws->fifo_delay_map |= 1 << fifo; fws->fifo_enqpkt[fifo]++; /* update the sk_buff state */ brcmf_skbcb(p)->state = state; /* * A packet has been pushed so update traffic * availability bitmap, if applicable */ brcmf_fws_tim_update(fws, entry, fifo, true); brcmf_fws_flow_control_check(fws, &entry->psq, brcmf_skb_if_flags_get_field(p, INDEX)); return 0; } static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) { struct brcmf_fws_mac_descriptor *table; struct brcmf_fws_mac_descriptor *entry; struct sk_buff *p; int num_nodes; int node_pos; int prec_out; int pmsk; int i; table = (struct brcmf_fws_mac_descriptor *)&fws->desc; num_nodes = sizeof(fws->desc) / sizeof(struct brcmf_fws_mac_descriptor); node_pos = fws->deq_node_pos[fifo]; for (i = 0; i < num_nodes; i++) { entry = &table[(node_pos + i) % num_nodes]; if (!entry->occupied || brcmf_fws_macdesc_closed(fws, entry, fifo)) continue; if (entry->suppressed) pmsk = 2; else pmsk = 3; p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out); if (p == NULL) { if (entry->suppressed) { if (entry->suppr_transit_count) continue; entry->suppressed = false; p = brcmu_pktq_mdeq(&entry->psq, 1 << (fifo * 2), &prec_out); } } if (p == NULL) continue; brcmf_fws_macdesc_use_req_credit(entry, p); /* move dequeue position to ensure fair round-robin */ fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes; brcmf_fws_flow_control_check(fws, &entry->psq, brcmf_skb_if_flags_get_field(p, INDEX) ); /* * A packet has been picked up, update traffic * availability bitmap, if applicable */ brcmf_fws_tim_update(fws, entry, fifo, false); /* * decrement total enqueued fifo packets and * clear delay bitmap if done. */ fws->fifo_enqpkt[fifo]--; if (fws->fifo_enqpkt[fifo] == 0) fws->fifo_delay_map &= ~(1 << fifo); goto done; } p = NULL; done: brcmf_dbg(DATA, "exit: fifo %d skb %p\n", fifo, p); return p; } static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo, struct sk_buff *skb, u8 ifidx, u32 genbit, u16 seq) { struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; u32 hslot; int ret; hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); /* this packet was suppressed */ if (!entry->suppressed) { entry->suppressed = true; entry->suppr_transit_count = entry->transit_count; brcmf_dbg(DATA, "suppress %s: transit %d\n", entry->name, entry->transit_count); } entry->generation = genbit; brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit); brcmf_skbcb(skb)->htod_seq = seq; if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) { brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1); brcmf_skb_htod_seq_set_field(skb, FROMFW, 0); } else { brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0); } ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb); if (ret != 0) { /* suppress q is full drop this packet */ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true); } else { /* Mark suppressed to avoid a double free during wlfc cleanup */ brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot); } return ret; } static int brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, u32 genbit, u16 seq) { u32 fifo; int ret; bool remove_from_hanger = true; struct sk_buff *skb; struct brcmf_skbuff_cb *skcb; struct brcmf_fws_mac_descriptor *entry = NULL; u8 ifidx; brcmf_dbg(DATA, "flags %d\n", flags); if (flags == BRCMF_FWS_TXSTATUS_DISCARD) fws->stats.txs_discard++; else if (flags == BRCMF_FWS_TXSTATUS_CORE_SUPPRESS) { fws->stats.txs_supp_core++; remove_from_hanger = false; } else if (flags == BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS) { fws->stats.txs_supp_ps++; remove_from_hanger = false; } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED) fws->stats.txs_tossed++; else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED) fws->stats.txs_host_tossed++; else brcmf_err("unexpected txstatus\n"); ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, remove_from_hanger); if (ret != 0) { brcmf_err("no packet in hanger slot: hslot=%d\n", hslot); return ret; } skcb = brcmf_skbcb(skb); entry = skcb->mac; if (WARN_ON(!entry)) { brcmu_pkt_buf_free_skb(skb); return -EINVAL; } entry->transit_count--; if (entry->suppressed && entry->suppr_transit_count) entry->suppr_transit_count--; brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, flags, skcb->htod, seq); /* pick up the implicit credit from this packet */ fifo = brcmf_skb_htod_tag_get_field(skb, FIFO); if ((fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT) || (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) || (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)) { brcmf_fws_return_credits(fws, fifo, 1); brcmf_fws_schedule_deq(fws); } brcmf_fws_macdesc_return_req_credit(skb); if (brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb)) { brcmu_pkt_buf_free_skb(skb); return -EINVAL; } if (!remove_from_hanger) ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, ifidx, genbit, seq); if (remove_from_hanger || ret) brcmf_txfinalize(fws->drvr, skb, ifidx, true); return 0; } static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws, u8 *data) { int i; if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) { brcmf_dbg(INFO, "ignored\n"); return BRCMF_FWS_RET_OK_NOSCHEDULE; } brcmf_dbg(DATA, "enter: data %pM\n", data); brcmf_fws_lock(fws); for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++) brcmf_fws_return_credits(fws, i, data[i]); brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map, fws->fifo_delay_map); brcmf_fws_unlock(fws); return BRCMF_FWS_RET_OK_SCHEDULE; } static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data) { __le32 status_le; __le16 seq_le; u32 status; u32 hslot; u32 genbit; u8 flags; u16 seq; fws->stats.txs_indicate++; memcpy(&status_le, data, sizeof(status_le)); status = le32_to_cpu(status_le); flags = brcmf_txstatus_get_field(status, FLAGS); hslot = brcmf_txstatus_get_field(status, HSLOT); genbit = brcmf_txstatus_get_field(status, GENERATION); if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) { memcpy(&seq_le, &data[BRCMF_FWS_TYPE_PKTTAG_LEN], sizeof(seq_le)); seq = le16_to_cpu(seq_le); } else { seq = 0; } brcmf_fws_lock(fws); brcmf_fws_txs_process(fws, flags, hslot, genbit, seq); brcmf_fws_unlock(fws); return BRCMF_FWS_RET_OK_NOSCHEDULE; } static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data) { __le32 timestamp; memcpy(&timestamp, &data[2], sizeof(timestamp)); brcmf_dbg(CTL, "received: seq %d, timestamp %d\n", data[1], le32_to_cpu(timestamp)); return 0; } static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { struct brcmf_fws_info *fws = ifp->drvr->fws; int i; u8 *credits = data; if (e->datalen < BRCMF_FWS_FIFO_COUNT) { brcmf_err("event payload too small (%d)\n", e->datalen); return -EINVAL; } if (fws->creditmap_received) return 0; fws->creditmap_received = true; brcmf_dbg(TRACE, "enter: credits %pM\n", credits); brcmf_fws_lock(fws); for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) { if (*credits) fws->fifo_credit_map |= 1 << i; else fws->fifo_credit_map &= ~(1 << i); fws->fifo_credit[i] = *credits++; } brcmf_fws_schedule_deq(fws); brcmf_fws_unlock(fws); return 0; } static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { struct brcmf_fws_info *fws = ifp->drvr->fws; brcmf_fws_lock(fws); if (fws) fws->bcmc_credit_check = true; brcmf_fws_unlock(fws); return 0; } int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, struct sk_buff *skb) { struct brcmf_skb_reorder_data *rd; struct brcmf_fws_info *fws = drvr->fws; u8 *signal_data; s16 data_len; u8 type; u8 len; u8 *data; s32 status; s32 err; brcmf_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n", ifidx, skb->len, signal_len); WARN_ON(signal_len > skb->len); if (!signal_len) return 0; /* if flow control disabled, skip to packet data and leave */ if ((!fws) || (!fws->fw_signals)) { skb_pull(skb, signal_len); return 0; } fws->stats.header_pulls++; data_len = signal_len; signal_data = skb->data; status = BRCMF_FWS_RET_OK_NOSCHEDULE; while (data_len > 0) { /* extract tlv info */ type = signal_data[0]; /* FILLER type is actually not a TLV, but * a single byte that can be skipped. */ if (type == BRCMF_FWS_TYPE_FILLER) { signal_data += 1; data_len -= 1; continue; } len = signal_data[1]; data = signal_data + 2; brcmf_dbg(HDRS, "tlv type=%s (%d), len=%d (%d)\n", brcmf_fws_get_tlv_name(type), type, len, brcmf_fws_get_tlv_len(fws, type)); /* abort parsing when length invalid */ if (data_len < len + 2) break; if (len < brcmf_fws_get_tlv_len(fws, type)) break; err = BRCMF_FWS_RET_OK_NOSCHEDULE; switch (type) { case BRCMF_FWS_TYPE_COMP_TXSTATUS: break; case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS: rd = (struct brcmf_skb_reorder_data *)skb->cb; rd->reorder = data; break; case BRCMF_FWS_TYPE_MACDESC_ADD: case BRCMF_FWS_TYPE_MACDESC_DEL: brcmf_fws_macdesc_indicate(fws, type, data); break; case BRCMF_FWS_TYPE_MAC_OPEN: case BRCMF_FWS_TYPE_MAC_CLOSE: err = brcmf_fws_macdesc_state_indicate(fws, type, data); break; case BRCMF_FWS_TYPE_INTERFACE_OPEN: case BRCMF_FWS_TYPE_INTERFACE_CLOSE: err = brcmf_fws_interface_state_indicate(fws, type, data); break; case BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT: case BRCMF_FWS_TYPE_MAC_REQUEST_PACKET: err = brcmf_fws_request_indicate(fws, type, data); break; case BRCMF_FWS_TYPE_TXSTATUS: brcmf_fws_txstatus_indicate(fws, data); break; case BRCMF_FWS_TYPE_FIFO_CREDITBACK: err = brcmf_fws_fifocreditback_indicate(fws, data); break; case BRCMF_FWS_TYPE_RSSI: brcmf_fws_rssi_indicate(fws, *data); break; case BRCMF_FWS_TYPE_TRANS_ID: brcmf_fws_dbg_seqnum_check(fws, data); break; case BRCMF_FWS_TYPE_PKTTAG: case BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP: default: fws->stats.tlv_invalid_type++; break; } if (err == BRCMF_FWS_RET_OK_SCHEDULE) status = BRCMF_FWS_RET_OK_SCHEDULE; signal_data += len + 2; data_len -= len + 2; } if (data_len != 0) fws->stats.tlv_parse_failed++; if (status == BRCMF_FWS_RET_OK_SCHEDULE) brcmf_fws_schedule_deq(fws); /* signalling processing result does * not affect the actual ethernet packet. */ skb_pull(skb, signal_len); /* this may be a signal-only packet */ if (skb->len == 0) fws->stats.header_only_pkt++; return 0; } static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo, struct sk_buff *p) { struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p); struct brcmf_fws_mac_descriptor *entry = skcb->mac; u8 flags; if (skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED) brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation); flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST; if (brcmf_skb_if_flags_get_field(p, REQUESTED)) { /* * Indicate that this packet is being sent in response to an * explicit request from the firmware side. */ flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED; } brcmf_skb_htod_tag_set_field(p, FLAGS, flags); return brcmf_fws_hdrpush(fws, p); } static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb, int fifo) { struct brcmf_fws_mac_descriptor *entry; struct sk_buff *pktout; int qidx, hslot; int rc = 0; entry = brcmf_skbcb(skb)->mac; if (entry->occupied) { qidx = 2 * fifo; if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_SUPPRESSED) qidx++; pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb); if (pktout == NULL) { brcmf_err("%s queue %d full\n", entry->name, qidx); rc = -ENOSPC; } } else { brcmf_err("%s entry removed\n", entry->name); rc = -ENOENT; } if (rc) { fws->stats.rollback_failed++; hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0); } else { fws->stats.rollback_success++; brcmf_fws_return_credits(fws, fifo, 1); brcmf_fws_macdesc_return_req_credit(skb); } } static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws) { int lender_ac; if (time_after(fws->borrow_defer_timestamp, jiffies)) { fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE); return -ENAVAIL; } for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) { if (fws->fifo_credit[lender_ac]) { fws->credits_borrowed[lender_ac]++; fws->fifo_credit[lender_ac]--; if (fws->fifo_credit[lender_ac] == 0) fws->fifo_credit_map &= ~(1 << lender_ac); fws->fifo_credit_map |= (1 << BRCMF_FWS_FIFO_AC_BE); brcmf_dbg(DATA, "borrow credit from: %d\n", lender_ac); return 0; } } fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE); return -ENAVAIL; } static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo, struct sk_buff *skb) { struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb); struct brcmf_fws_mac_descriptor *entry; int rc; u8 ifidx; u8 data_offset; entry = skcb->mac; if (IS_ERR(entry)) return PTR_ERR(entry); data_offset = brcmf_fws_precommit_skb(fws, fifo, skb); entry->transit_count++; if (entry->suppressed) entry->suppr_transit_count++; ifidx = brcmf_skb_if_flags_get_field(skb, INDEX); brcmf_fws_unlock(fws); rc = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb); brcmf_fws_lock(fws); brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name, skcb->if_flags, skcb->htod, rc); if (rc < 0) { entry->transit_count--; if (entry->suppressed) entry->suppr_transit_count--; brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); goto rollback; } fws->stats.pkt2bus++; fws->stats.send_pkts[fifo]++; if (brcmf_skb_if_flags_get_field(skb, REQUESTED)) fws->stats.requested_sent[fifo]++; return rc; rollback: brcmf_fws_rollback_toq(fws, skb, fifo); return rc; } static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p, int fifo) { struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p); int rc, hslot; skcb->htod = 0; skcb->htod_seq = 0; hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger); brcmf_skb_htod_tag_set_field(p, HSLOT, hslot); brcmf_skb_htod_tag_set_field(p, FREERUN, skcb->mac->seq[fifo]); brcmf_skb_htod_tag_set_field(p, FIFO, fifo); rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot); if (!rc) skcb->mac->seq[fifo]++; else fws->stats.generic_error++; return rc; } int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) { struct brcmf_pub *drvr = ifp->drvr; struct brcmf_fws_info *fws = drvr->fws; struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb); struct ethhdr *eh = (struct ethhdr *)(skb->data); int fifo = BRCMF_FWS_FIFO_BCMC; bool multicast = is_multicast_ether_addr(eh->h_dest); int rc = 0; brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto)); /* determine the priority */ if (!skb->priority) skb->priority = cfg80211_classify8021d(skb, NULL); drvr->tx_multicast += !!multicast; if (fws->avoid_queueing) { rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb); if (rc < 0) brcmf_txfinalize(drvr, skb, ifp->ifidx, false); return rc; } /* set control buffer information */ skcb->if_flags = 0; skcb->state = BRCMF_FWS_SKBSTATE_NEW; brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx); if (!multicast) fifo = brcmf_fws_prio2fifo[skb->priority]; brcmf_fws_lock(fws); if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC) fws->borrow_defer_timestamp = jiffies + BRCMF_FWS_BORROW_DEFER_PERIOD; skcb->mac = brcmf_fws_macdesc_find(fws, ifp, eh->h_dest); brcmf_dbg(DATA, "%s mac %pM multi %d fifo %d\n", skcb->mac->name, eh->h_dest, multicast, fifo); if (!brcmf_fws_assign_htod(fws, skb, fifo)) { brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb); brcmf_fws_schedule_deq(fws); } else { brcmf_err("drop skb: no hanger slot\n"); brcmf_txfinalize(drvr, skb, ifp->ifidx, false); rc = -ENOMEM; } brcmf_fws_unlock(fws); return rc; } void brcmf_fws_reset_interface(struct brcmf_if *ifp) { struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx); if (!entry) return; brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); } void brcmf_fws_add_interface(struct brcmf_if *ifp) { struct brcmf_fws_info *fws = ifp->drvr->fws; struct brcmf_fws_mac_descriptor *entry; if (!ifp->ndev) return; entry = &fws->desc.iface[ifp->ifidx]; ifp->fws_desc = entry; brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); brcmf_fws_macdesc_set_name(fws, entry); brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); brcmf_dbg(TRACE, "added %s\n", entry->name); } void brcmf_fws_del_interface(struct brcmf_if *ifp) { struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; if (!entry) return; brcmf_fws_lock(ifp->drvr->fws); ifp->fws_desc = NULL; brcmf_dbg(TRACE, "deleting %s\n", entry->name); brcmf_fws_macdesc_deinit(entry); brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx); brcmf_fws_unlock(ifp->drvr->fws); } static void brcmf_fws_dequeue_worker(struct work_struct *worker) { struct brcmf_fws_info *fws; struct brcmf_pub *drvr; struct sk_buff *skb; int fifo; u32 hslot; u32 ifidx; int ret; fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work); drvr = fws->drvr; brcmf_fws_lock(fws); for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked; fifo--) { if (!brcmf_fws_fc_active(fws)) { while ((skb = brcmf_fws_deq(fws, fifo)) != NULL) { hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true); ifidx = brcmf_skb_if_flags_get_field(skb, INDEX); /* Use proto layer to send data frame */ brcmf_fws_unlock(fws); ret = brcmf_proto_txdata(drvr, ifidx, 0, skb); brcmf_fws_lock(fws); if (ret < 0) brcmf_txfinalize(drvr, skb, ifidx, false); if (fws->bus_flow_blocked) break; } continue; } while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) && (fifo == BRCMF_FWS_FIFO_BCMC))) { skb = brcmf_fws_deq(fws, fifo); if (!skb) break; fws->fifo_credit[fifo]--; if (brcmf_fws_commit_skb(fws, fifo, skb)) break; if (fws->bus_flow_blocked) break; } if ((fifo == BRCMF_FWS_FIFO_AC_BE) && (fws->fifo_credit[fifo] == 0) && (!fws->bus_flow_blocked)) { while (brcmf_fws_borrow_credit(fws) == 0) { skb = brcmf_fws_deq(fws, fifo); if (!skb) { brcmf_fws_return_credits(fws, fifo, 1); break; } if (brcmf_fws_commit_skb(fws, fifo, skb)) break; if (fws->bus_flow_blocked) break; } } } brcmf_fws_unlock(fws); } #ifdef DEBUG static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data) { struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); struct brcmf_fws_stats *fwstats = &bus_if->drvr->fws->stats; seq_printf(seq, "header_pulls: %u\n" "header_only_pkt: %u\n" "tlv_parse_failed: %u\n" "tlv_invalid_type: %u\n" "mac_update_fails: %u\n" "ps_update_fails: %u\n" "if_update_fails: %u\n" "pkt2bus: %u\n" "generic_error: %u\n" "rollback_success: %u\n" "rollback_failed: %u\n" "delayq_full: %u\n" "supprq_full: %u\n" "txs_indicate: %u\n" "txs_discard: %u\n" "txs_suppr_core: %u\n" "txs_suppr_ps: %u\n" "txs_tossed: %u\n" "txs_host_tossed: %u\n" "bus_flow_block: %u\n" "fws_flow_block: %u\n" "send_pkts: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n" "requested_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n", fwstats->header_pulls, fwstats->header_only_pkt, fwstats->tlv_parse_failed, fwstats->tlv_invalid_type, fwstats->mac_update_failed, fwstats->mac_ps_update_failed, fwstats->if_update_failed, fwstats->pkt2bus, fwstats->generic_error, fwstats->rollback_success, fwstats->rollback_failed, fwstats->delayq_full_error, fwstats->supprq_full_error, fwstats->txs_indicate, fwstats->txs_discard, fwstats->txs_supp_core, fwstats->txs_supp_ps, fwstats->txs_tossed, fwstats->txs_host_tossed, fwstats->bus_flow_block, fwstats->fws_flow_block, fwstats->send_pkts[0], fwstats->send_pkts[1], fwstats->send_pkts[2], fwstats->send_pkts[3], fwstats->send_pkts[4], fwstats->requested_sent[0], fwstats->requested_sent[1], fwstats->requested_sent[2], fwstats->requested_sent[3], fwstats->requested_sent[4]); return 0; } #else static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data) { return 0; } #endif int brcmf_fws_init(struct brcmf_pub *drvr) { struct brcmf_fws_info *fws; u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS; int rc; u32 mode; drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL); if (!drvr->fws) { rc = -ENOMEM; goto fail; } fws = drvr->fws; spin_lock_init(&fws->spinlock); /* set linkage back */ fws->drvr = drvr; fws->fcmode = fcmode; if ((drvr->bus_if->always_use_fws_queue == false) && (fcmode == BRCMF_FWS_FCMODE_NONE)) { fws->avoid_queueing = true; brcmf_dbg(INFO, "FWS queueing will be avoided\n"); return 0; } fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq"); if (fws->fws_wq == NULL) { brcmf_err("workqueue creation failed\n"); rc = -EBADF; goto fail; } INIT_WORK(&fws->fws_dequeue_work, brcmf_fws_dequeue_worker); /* enable firmware signalling if fcmode active */ if (fws->fcmode != BRCMF_FWS_FCMODE_NONE) tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS | BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS | BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE | BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE; rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP, brcmf_fws_notify_credit_map); if (rc < 0) { brcmf_err("register credit map handler failed\n"); goto fail; } rc = brcmf_fweh_register(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT, brcmf_fws_notify_bcmc_credit_support); if (rc < 0) { brcmf_err("register bcmc credit handler failed\n"); brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP); goto fail; } /* Setting the iovar may fail if feature is unsupported * so leave the rc as is so driver initialization can * continue. Set mode back to none indicating not enabled. */ fws->fw_signals = true; if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) { brcmf_err("failed to set bdcv2 tlv signaling\n"); fws->fcmode = BRCMF_FWS_FCMODE_NONE; fws->fw_signals = false; } if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1)) brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n"); /* Enable seq number reuse, if supported */ if (brcmf_fil_iovar_int_get(drvr->iflist[0], "wlfc_mode", &mode) == 0) { if (BRCMF_FWS_MODE_GET_REUSESEQ(mode)) { mode = 0; BRCMF_FWS_MODE_SET_REUSESEQ(mode, 1); if (brcmf_fil_iovar_int_set(drvr->iflist[0], "wlfc_mode", mode) == 0) { BRCMF_FWS_MODE_SET_REUSESEQ(fws->mode, 1); } } } brcmf_fws_hanger_init(&fws->hanger); brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0); brcmf_fws_macdesc_set_name(fws, &fws->desc.other); brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); /* create debugfs file for statistics */ brcmf_debugfs_add_entry(drvr, "fws_stats", brcmf_debugfs_fws_stats_read); brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n", fws->fw_signals ? "enabled" : "disabled", tlv); return 0; fail: brcmf_fws_deinit(drvr); return rc; } void brcmf_fws_deinit(struct brcmf_pub *drvr) { struct brcmf_fws_info *fws = drvr->fws; if (!fws) return; if (drvr->fws->fws_wq) destroy_workqueue(drvr->fws->fws_wq); /* cleanup */ brcmf_fws_lock(fws); brcmf_fws_cleanup(fws, -1); drvr->fws = NULL; brcmf_fws_unlock(fws); /* free top structure */ kfree(fws); } bool brcmf_fws_fc_active(struct brcmf_fws_info *fws) { if (!fws->creditmap_received) return false; return fws->fcmode != BRCMF_FWS_FCMODE_NONE; } void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb) { u32 hslot; if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) { brcmu_pkt_buf_free_skb(skb); return; } brcmf_fws_lock(fws); hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0); brcmf_fws_unlock(fws); } void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked) { struct brcmf_fws_info *fws = drvr->fws; fws->bus_flow_blocked = flow_blocked; if (!flow_blocked) brcmf_fws_schedule_deq(fws); else fws->stats.bus_flow_block++; }
gpl-2.0
huz123/bricked.tenderloin
fs/ext3/namei.c
562
68410
/* * linux/fs/ext3/namei.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * Directory entry file type support and forward compatibility hooks * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998 * Hash Tree Directory indexing (c) * Daniel Phillips, 2001 * Hash Tree Directory indexing porting * Christopher Li, 2002 * Hash Tree Directory indexing cleanup * Theodore Ts'o, 2002 */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/jbd.h> #include <linux/time.h> #include <linux/ext3_fs.h> #include <linux/ext3_jbd.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include <linux/bio.h> #include "namei.h" #include "xattr.h" #include "acl.h" /* * define how far ahead to read directories while searching them. */ #define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b)) static struct buffer_head *ext3_append(handle_t *handle, struct inode *inode, u32 *block, int *err) { struct buffer_head *bh; *block = inode->i_size >> inode->i_sb->s_blocksize_bits; bh = ext3_bread(handle, inode, *block, 1, err); if (bh) { inode->i_size += inode->i_sb->s_blocksize; EXT3_I(inode)->i_disksize = inode->i_size; *err = ext3_journal_get_write_access(handle, bh); if (*err) { brelse(bh); bh = NULL; } } return bh; } #ifndef assert #define assert(test) J_ASSERT(test) #endif #ifdef DX_DEBUG #define dxtrace(command) command #else #define dxtrace(command) #endif struct fake_dirent { __le32 inode; __le16 rec_len; u8 name_len; u8 file_type; }; struct dx_countlimit { __le16 limit; __le16 count; }; struct dx_entry { __le32 hash; __le32 block; }; /* * dx_root_info is laid out so that if it should somehow get overlaid by a * dirent the two low bits of the hash version will be zero. Therefore, the * hash version mod 4 should never be 0. Sincerely, the paranoia department. */ struct dx_root { struct fake_dirent dot; char dot_name[4]; struct fake_dirent dotdot; char dotdot_name[4]; struct dx_root_info { __le32 reserved_zero; u8 hash_version; u8 info_length; /* 8 */ u8 indirect_levels; u8 unused_flags; } info; struct dx_entry entries[0]; }; struct dx_node { struct fake_dirent fake; struct dx_entry entries[0]; }; struct dx_frame { struct buffer_head *bh; struct dx_entry *entries; struct dx_entry *at; }; struct dx_map_entry { u32 hash; u16 offs; u16 size; }; static inline unsigned dx_get_block (struct dx_entry *entry); static void dx_set_block (struct dx_entry *entry, unsigned value); static inline unsigned dx_get_hash (struct dx_entry *entry); static void dx_set_hash (struct dx_entry *entry, unsigned value); static unsigned dx_get_count (struct dx_entry *entries); static unsigned dx_get_limit (struct dx_entry *entries); static void dx_set_count (struct dx_entry *entries, unsigned value); static void dx_set_limit (struct dx_entry *entries, unsigned value); static unsigned dx_root_limit (struct inode *dir, unsigned infosize); static unsigned dx_node_limit (struct inode *dir); static struct dx_frame *dx_probe(struct qstr *entry, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame, int *err); static void dx_release (struct dx_frame *frames); static int dx_make_map(struct ext3_dir_entry_2 *de, unsigned blocksize, struct dx_hash_info *hinfo, struct dx_map_entry map[]); static void dx_sort_map(struct dx_map_entry *map, unsigned count); static struct ext3_dir_entry_2 *dx_move_dirents (char *from, char *to, struct dx_map_entry *offsets, int count); static struct ext3_dir_entry_2 *dx_pack_dirents(char *base, unsigned blocksize); static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block); static int ext3_htree_next_block(struct inode *dir, __u32 hash, struct dx_frame *frame, struct dx_frame *frames, __u32 *start_hash); static struct buffer_head * ext3_dx_find_entry(struct inode *dir, struct qstr *entry, struct ext3_dir_entry_2 **res_dir, int *err); static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode); /* * p is at least 6 bytes before the end of page */ static inline struct ext3_dir_entry_2 * ext3_next_entry(struct ext3_dir_entry_2 *p) { return (struct ext3_dir_entry_2 *)((char *)p + ext3_rec_len_from_disk(p->rec_len)); } /* * Future: use high four bits of block for coalesce-on-delete flags * Mask them off for now. */ static inline unsigned dx_get_block (struct dx_entry *entry) { return le32_to_cpu(entry->block) & 0x00ffffff; } static inline void dx_set_block (struct dx_entry *entry, unsigned value) { entry->block = cpu_to_le32(value); } static inline unsigned dx_get_hash (struct dx_entry *entry) { return le32_to_cpu(entry->hash); } static inline void dx_set_hash (struct dx_entry *entry, unsigned value) { entry->hash = cpu_to_le32(value); } static inline unsigned dx_get_count (struct dx_entry *entries) { return le16_to_cpu(((struct dx_countlimit *) entries)->count); } static inline unsigned dx_get_limit (struct dx_entry *entries) { return le16_to_cpu(((struct dx_countlimit *) entries)->limit); } static inline void dx_set_count (struct dx_entry *entries, unsigned value) { ((struct dx_countlimit *) entries)->count = cpu_to_le16(value); } static inline void dx_set_limit (struct dx_entry *entries, unsigned value) { ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value); } static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize) { unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) - EXT3_DIR_REC_LEN(2) - infosize; return entry_space / sizeof(struct dx_entry); } static inline unsigned dx_node_limit (struct inode *dir) { unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0); return entry_space / sizeof(struct dx_entry); } /* * Debug */ #ifdef DX_DEBUG static void dx_show_index (char * label, struct dx_entry *entries) { int i, n = dx_get_count (entries); printk("%s index ", label); for (i = 0; i < n; i++) { printk("%x->%u ", i? dx_get_hash(entries + i): 0, dx_get_block(entries + i)); } printk("\n"); } struct stats { unsigned names; unsigned space; unsigned bcount; }; static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_entry_2 *de, int size, int show_names) { unsigned names = 0, space = 0; char *base = (char *) de; struct dx_hash_info h = *hinfo; printk("names: "); while ((char *) de < base + size) { if (de->inode) { if (show_names) { int len = de->name_len; char *name = de->name; while (len--) printk("%c", *name++); ext3fs_dirhash(de->name, de->name_len, &h); printk(":%x.%u ", h.hash, ((char *) de - base)); } space += EXT3_DIR_REC_LEN(de->name_len); names++; } de = ext3_next_entry(de); } printk("(%i)\n", names); return (struct stats) { names, space, 1 }; } struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, struct dx_entry *entries, int levels) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned count = dx_get_count (entries), names = 0, space = 0, i; unsigned bcount = 0; struct buffer_head *bh; int err; printk("%i indexed blocks...\n", count); for (i = 0; i < count; i++, entries++) { u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0; u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash; struct stats stats; printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range); if (!(bh = ext3_bread (NULL,dir, block, 0,&err))) continue; stats = levels? dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): dx_show_leaf(hinfo, (struct ext3_dir_entry_2 *) bh->b_data, blocksize, 0); names += stats.names; space += stats.space; bcount += stats.bcount; brelse (bh); } if (bcount) printk("%snames %u, fullness %u (%u%%)\n", levels?"":" ", names, space/bcount,(space/bcount)*100/blocksize); return (struct stats) { names, space, bcount}; } #endif /* DX_DEBUG */ /* * Probe for a directory leaf block to search. * * dx_probe can return ERR_BAD_DX_DIR, which means there was a format * error in the directory index, and the caller should fall back to * searching the directory normally. The callers of dx_probe **MUST** * check for this error code, and make sure it never gets reflected * back to userspace. */ static struct dx_frame * dx_probe(struct qstr *entry, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err) { unsigned count, indirect; struct dx_entry *at, *entries, *p, *q, *m; struct dx_root *root; struct buffer_head *bh; struct dx_frame *frame = frame_in; u32 hash; frame->bh = NULL; if (!(bh = ext3_bread (NULL,dir, 0, 0, err))) goto fail; root = (struct dx_root *) bh->b_data; if (root->info.hash_version != DX_HASH_TEA && root->info.hash_version != DX_HASH_HALF_MD4 && root->info.hash_version != DX_HASH_LEGACY) { ext3_warning(dir->i_sb, __func__, "Unrecognised inode hash code %d", root->info.hash_version); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } hinfo->hash_version = root->info.hash_version; if (hinfo->hash_version <= DX_HASH_TEA) hinfo->hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned; hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed; if (entry) ext3fs_dirhash(entry->name, entry->len, hinfo); hash = hinfo->hash; if (root->info.unused_flags & 1) { ext3_warning(dir->i_sb, __func__, "Unimplemented inode hash flags: %#06x", root->info.unused_flags); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } if ((indirect = root->info.indirect_levels) > 1) { ext3_warning(dir->i_sb, __func__, "Unimplemented inode hash depth: %#06x", root->info.indirect_levels); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } entries = (struct dx_entry *) (((char *)&root->info) + root->info.info_length); if (dx_get_limit(entries) != dx_root_limit(dir, root->info.info_length)) { ext3_warning(dir->i_sb, __func__, "dx entry: limit != root limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } dxtrace (printk("Look up %x", hash)); while (1) { count = dx_get_count(entries); if (!count || count > dx_get_limit(entries)) { ext3_warning(dir->i_sb, __func__, "dx entry: no count or count > limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail2; } p = entries + 1; q = entries + count - 1; while (p <= q) { m = p + (q - p)/2; dxtrace(printk(".")); if (dx_get_hash(m) > hash) q = m - 1; else p = m + 1; } if (0) // linear search cross check { unsigned n = count - 1; at = entries; while (n--) { dxtrace(printk(",")); if (dx_get_hash(++at) > hash) { at--; break; } } assert (at == p - 1); } at = p - 1; dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at))); frame->bh = bh; frame->entries = entries; frame->at = at; if (!indirect--) return frame; if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err))) goto fail2; at = entries = ((struct dx_node *) bh->b_data)->entries; if (dx_get_limit(entries) != dx_node_limit (dir)) { ext3_warning(dir->i_sb, __func__, "dx entry: limit != node limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail2; } frame++; frame->bh = NULL; } fail2: while (frame >= frame_in) { brelse(frame->bh); frame--; } fail: if (*err == ERR_BAD_DX_DIR) ext3_warning(dir->i_sb, __func__, "Corrupt dir inode %ld, running e2fsck is " "recommended.", dir->i_ino); return NULL; } static void dx_release (struct dx_frame *frames) { if (frames[0].bh == NULL) return; if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels) brelse(frames[1].bh); brelse(frames[0].bh); } /* * This function increments the frame pointer to search the next leaf * block, and reads in the necessary intervening nodes if the search * should be necessary. Whether or not the search is necessary is * controlled by the hash parameter. If the hash value is even, then * the search is only continued if the next block starts with that * hash value. This is used if we are searching for a specific file. * * If the hash value is HASH_NB_ALWAYS, then always go to the next block. * * This function returns 1 if the caller should continue to search, * or 0 if it should not. If there is an error reading one of the * index blocks, it will a negative error code. * * If start_hash is non-null, it will be filled in with the starting * hash of the next page. */ static int ext3_htree_next_block(struct inode *dir, __u32 hash, struct dx_frame *frame, struct dx_frame *frames, __u32 *start_hash) { struct dx_frame *p; struct buffer_head *bh; int err, num_frames = 0; __u32 bhash; p = frame; /* * Find the next leaf page by incrementing the frame pointer. * If we run out of entries in the interior node, loop around and * increment pointer in the parent node. When we break out of * this loop, num_frames indicates the number of interior * nodes need to be read. */ while (1) { if (++(p->at) < p->entries + dx_get_count(p->entries)) break; if (p == frames) return 0; num_frames++; p--; } /* * If the hash is 1, then continue only if the next page has a * continuation hash of any value. This is used for readdir * handling. Otherwise, check to see if the hash matches the * desired contiuation hash. If it doesn't, return since * there's no point to read in the successive index pages. */ bhash = dx_get_hash(p->at); if (start_hash) *start_hash = bhash; if ((hash & 1) == 0) { if ((bhash & ~1) != hash) return 0; } /* * If the hash is HASH_NB_ALWAYS, we always go to the next * block so no check is necessary */ while (num_frames--) { if (!(bh = ext3_bread(NULL, dir, dx_get_block(p->at), 0, &err))) return err; /* Failure */ p++; brelse (p->bh); p->bh = bh; p->at = p->entries = ((struct dx_node *) bh->b_data)->entries; } return 1; } /* * This function fills a red-black tree with information from a * directory block. It returns the number directory entries loaded * into the tree. If there is an error it is returned in err. */ static int htree_dirblock_to_tree(struct file *dir_file, struct inode *dir, int block, struct dx_hash_info *hinfo, __u32 start_hash, __u32 start_minor_hash) { struct buffer_head *bh; struct ext3_dir_entry_2 *de, *top; int err, count = 0; dxtrace(printk("In htree dirblock_to_tree: block %d\n", block)); if (!(bh = ext3_bread (NULL, dir, block, 0, &err))) return err; de = (struct ext3_dir_entry_2 *) bh->b_data; top = (struct ext3_dir_entry_2 *) ((char *) de + dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0)); for (; de < top; de = ext3_next_entry(de)) { if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh, (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb)) +((char *)de - bh->b_data))) { /* On error, skip the f_pos to the next block. */ dir_file->f_pos = (dir_file->f_pos | (dir->i_sb->s_blocksize - 1)) + 1; brelse (bh); return count; } ext3fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && (hinfo->minor_hash < start_minor_hash))) continue; if (de->inode == 0) continue; if ((err = ext3_htree_store_dirent(dir_file, hinfo->hash, hinfo->minor_hash, de)) != 0) { brelse(bh); return err; } count++; } brelse(bh); return count; } /* * This function fills a red-black tree with information from a * directory. We start scanning the directory in hash order, starting * at start_hash and start_minor_hash. * * This function returns the number of entries inserted into the tree, * or a negative error code. */ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash, __u32 start_minor_hash, __u32 *next_hash) { struct dx_hash_info hinfo; struct ext3_dir_entry_2 *de; struct dx_frame frames[2], *frame; struct inode *dir; int block, err; int count = 0; int ret; __u32 hashval; dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash, start_minor_hash)); dir = dir_file->f_path.dentry->d_inode; if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) { hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version; if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned; hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed; count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo, start_hash, start_minor_hash); *next_hash = ~0; return count; } hinfo.hash = start_hash; hinfo.minor_hash = 0; frame = dx_probe(NULL, dir_file->f_path.dentry->d_inode, &hinfo, frames, &err); if (!frame) return err; /* Add '.' and '..' from the htree header */ if (!start_hash && !start_minor_hash) { de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data; if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0) goto errout; count++; } if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) { de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data; de = ext3_next_entry(de); if ((err = ext3_htree_store_dirent(dir_file, 2, 0, de)) != 0) goto errout; count++; } while (1) { block = dx_get_block(frame->at); ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo, start_hash, start_minor_hash); if (ret < 0) { err = ret; goto errout; } count += ret; hashval = ~0; ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS, frame, frames, &hashval); *next_hash = hashval; if (ret < 0) { err = ret; goto errout; } /* * Stop if: (a) there are no more entries, or * (b) we have inserted at least one entry and the * next hash value is not a continuation */ if ((ret == 0) || (count && ((hashval & 1) == 0))) break; } dx_release(frames); dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n", count, *next_hash)); return count; errout: dx_release(frames); return (err); } /* * Directory block splitting, compacting */ /* * Create map of hash values, offsets, and sizes, stored at end of block. * Returns number of entries mapped. */ static int dx_make_map(struct ext3_dir_entry_2 *de, unsigned blocksize, struct dx_hash_info *hinfo, struct dx_map_entry *map_tail) { int count = 0; char *base = (char *) de; struct dx_hash_info h = *hinfo; while ((char *) de < base + blocksize) { if (de->name_len && de->inode) { ext3fs_dirhash(de->name, de->name_len, &h); map_tail--; map_tail->hash = h.hash; map_tail->offs = (u16) ((char *) de - base); map_tail->size = le16_to_cpu(de->rec_len); count++; cond_resched(); } /* XXX: do we need to check rec_len == 0 case? -Chris */ de = ext3_next_entry(de); } return count; } /* Sort map by hash value */ static void dx_sort_map (struct dx_map_entry *map, unsigned count) { struct dx_map_entry *p, *q, *top = map + count - 1; int more; /* Combsort until bubble sort doesn't suck */ while (count > 2) { count = count*10/13; if (count - 9 < 2) /* 9, 10 -> 11 */ count = 11; for (p = top, q = p - count; q >= map; p--, q--) if (p->hash < q->hash) swap(*p, *q); } /* Garden variety bubble sort */ do { more = 0; q = top; while (q-- > map) { if (q[1].hash >= q[0].hash) continue; swap(*(q+1), *q); more = 1; } } while(more); } static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block) { struct dx_entry *entries = frame->entries; struct dx_entry *old = frame->at, *new = old + 1; int count = dx_get_count(entries); assert(count < dx_get_limit(entries)); assert(old < entries + count); memmove(new + 1, new, (char *)(entries + count) - (char *)(new)); dx_set_hash(new, hash); dx_set_block(new, block); dx_set_count(entries, count + 1); } static void ext3_update_dx_flag(struct inode *inode) { if (!EXT3_HAS_COMPAT_FEATURE(inode->i_sb, EXT3_FEATURE_COMPAT_DIR_INDEX)) EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL; } /* * NOTE! unlike strncmp, ext3_match returns 1 for success, 0 for failure. * * `len <= EXT3_NAME_LEN' is guaranteed by caller. * `de != NULL' is guaranteed by caller. */ static inline int ext3_match (int len, const char * const name, struct ext3_dir_entry_2 * de) { if (len != de->name_len) return 0; if (!de->inode) return 0; return !memcmp(name, de->name, len); } /* * Returns 0 if not found, -1 on failure, and 1 on success */ static inline int search_dirblock(struct buffer_head * bh, struct inode *dir, struct qstr *child, unsigned long offset, struct ext3_dir_entry_2 ** res_dir) { struct ext3_dir_entry_2 * de; char * dlimit; int de_len; const char *name = child->name; int namelen = child->len; de = (struct ext3_dir_entry_2 *) bh->b_data; dlimit = bh->b_data + dir->i_sb->s_blocksize; while ((char *) de < dlimit) { /* this code is executed quadratically often */ /* do minimal checking `by hand' */ if ((char *) de + namelen <= dlimit && ext3_match (namelen, name, de)) { /* found a match - just to be sure, do a full check */ if (!ext3_check_dir_entry("ext3_find_entry", dir, de, bh, offset)) return -1; *res_dir = de; return 1; } /* prevent looping on a bad block */ de_len = ext3_rec_len_from_disk(de->rec_len); if (de_len <= 0) return -1; offset += de_len; de = (struct ext3_dir_entry_2 *) ((char *) de + de_len); } return 0; } /* * ext3_find_entry() * * finds an entry in the specified directory with the wanted name. It * returns the cache buffer in which the entry was found, and the entry * itself (as a parameter - res_dir). It does NOT read the inode of the * entry - you'll have to do that yourself if you want to. * * The returned buffer_head has ->b_count elevated. The caller is expected * to brelse() it when appropriate. */ static struct buffer_head *ext3_find_entry(struct inode *dir, struct qstr *entry, struct ext3_dir_entry_2 **res_dir) { struct super_block * sb; struct buffer_head * bh_use[NAMEI_RA_SIZE]; struct buffer_head * bh, *ret = NULL; unsigned long start, block, b; int ra_max = 0; /* Number of bh's in the readahead buffer, bh_use[] */ int ra_ptr = 0; /* Current index into readahead buffer */ int num = 0; int nblocks, i, err; int namelen; *res_dir = NULL; sb = dir->i_sb; namelen = entry->len; if (namelen > EXT3_NAME_LEN) return NULL; if (is_dx(dir)) { bh = ext3_dx_find_entry(dir, entry, res_dir, &err); /* * On success, or if the error was file not found, * return. Otherwise, fall back to doing a search the * old fashioned way. */ if (bh || (err != ERR_BAD_DX_DIR)) return bh; dxtrace(printk("ext3_find_entry: dx failed, falling back\n")); } nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb); start = EXT3_I(dir)->i_dir_start_lookup; if (start >= nblocks) start = 0; block = start; restart: do { /* * We deal with the read-ahead logic here. */ if (ra_ptr >= ra_max) { /* Refill the readahead buffer */ ra_ptr = 0; b = block; for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) { /* * Terminate if we reach the end of the * directory and must wrap, or if our * search has finished at this block. */ if (b >= nblocks || (num && block == start)) { bh_use[ra_max] = NULL; break; } num++; bh = ext3_getblk(NULL, dir, b++, 0, &err); bh_use[ra_max] = bh; if (bh) ll_rw_block(READ_META, 1, &bh); } } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; wait_on_buffer(bh); if (!buffer_uptodate(bh)) { /* read error, skip block & hope for the best */ ext3_error(sb, __func__, "reading directory #%lu " "offset %lu", dir->i_ino, block); brelse(bh); goto next; } i = search_dirblock(bh, dir, entry, block << EXT3_BLOCK_SIZE_BITS(sb), res_dir); if (i == 1) { EXT3_I(dir)->i_dir_start_lookup = block; ret = bh; goto cleanup_and_exit; } else { brelse(bh); if (i < 0) goto cleanup_and_exit; } next: if (++block >= nblocks) block = 0; } while (block != start); /* * If the directory has grown while we were searching, then * search the last part of the directory before giving up. */ block = nblocks; nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb); if (block < nblocks) { start = 0; goto restart; } cleanup_and_exit: /* Clean up the read-ahead blocks */ for (; ra_ptr < ra_max; ra_ptr++) brelse (bh_use[ra_ptr]); return ret; } static struct buffer_head * ext3_dx_find_entry(struct inode *dir, struct qstr *entry, struct ext3_dir_entry_2 **res_dir, int *err) { struct super_block * sb; struct dx_hash_info hinfo; u32 hash; struct dx_frame frames[2], *frame; struct ext3_dir_entry_2 *de, *top; struct buffer_head *bh; unsigned long block; int retval; int namelen = entry->len; const u8 *name = entry->name; sb = dir->i_sb; /* NFS may look up ".." - look at dx_root directory block */ if (namelen > 2 || name[0] != '.'|| (namelen == 2 && name[1] != '.')) { if (!(frame = dx_probe(entry, dir, &hinfo, frames, err))) return NULL; } else { frame = frames; frame->bh = NULL; /* for dx_release() */ frame->at = (struct dx_entry *)frames; /* hack for zero entry*/ dx_set_block(frame->at, 0); /* dx_root block is 0 */ } hash = hinfo.hash; do { block = dx_get_block(frame->at); if (!(bh = ext3_bread (NULL,dir, block, 0, err))) goto errout; de = (struct ext3_dir_entry_2 *) bh->b_data; top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize - EXT3_DIR_REC_LEN(0)); for (; de < top; de = ext3_next_entry(de)) { int off = (block << EXT3_BLOCK_SIZE_BITS(sb)) + ((char *) de - bh->b_data); if (!ext3_check_dir_entry(__func__, dir, de, bh, off)) { brelse(bh); *err = ERR_BAD_DX_DIR; goto errout; } if (ext3_match(namelen, name, de)) { *res_dir = de; dx_release(frames); return bh; } } brelse (bh); /* Check to see if we should continue to search */ retval = ext3_htree_next_block(dir, hash, frame, frames, NULL); if (retval < 0) { ext3_warning(sb, __func__, "error reading index page in directory #%lu", dir->i_ino); *err = retval; goto errout; } } while (retval == 1); *err = -ENOENT; errout: dxtrace(printk("%s not found\n", name)); dx_release (frames); return NULL; } static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd) { struct inode * inode; struct ext3_dir_entry_2 * de; struct buffer_head * bh; if (dentry->d_name.len > EXT3_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); bh = ext3_find_entry(dir, &dentry->d_name, &de); inode = NULL; if (bh) { unsigned long ino = le32_to_cpu(de->inode); brelse (bh); if (!ext3_valid_inum(dir->i_sb, ino)) { ext3_error(dir->i_sb, "ext3_lookup", "bad inode number: %lu", ino); return ERR_PTR(-EIO); } inode = ext3_iget(dir->i_sb, ino); if (unlikely(IS_ERR(inode))) { if (PTR_ERR(inode) == -ESTALE) { ext3_error(dir->i_sb, __func__, "deleted inode referenced: %lu", ino); return ERR_PTR(-EIO); } else { return ERR_CAST(inode); } } } return d_splice_alias(inode, dentry); } struct dentry *ext3_get_parent(struct dentry *child) { unsigned long ino; struct qstr dotdot = {.name = "..", .len = 2}; struct ext3_dir_entry_2 * de; struct buffer_head *bh; bh = ext3_find_entry(child->d_inode, &dotdot, &de); if (!bh) return ERR_PTR(-ENOENT); ino = le32_to_cpu(de->inode); brelse(bh); if (!ext3_valid_inum(child->d_inode->i_sb, ino)) { ext3_error(child->d_inode->i_sb, "ext3_get_parent", "bad inode number: %lu", ino); return ERR_PTR(-EIO); } return d_obtain_alias(ext3_iget(child->d_inode->i_sb, ino)); } #define S_SHIFT 12 static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = { [S_IFREG >> S_SHIFT] = EXT3_FT_REG_FILE, [S_IFDIR >> S_SHIFT] = EXT3_FT_DIR, [S_IFCHR >> S_SHIFT] = EXT3_FT_CHRDEV, [S_IFBLK >> S_SHIFT] = EXT3_FT_BLKDEV, [S_IFIFO >> S_SHIFT] = EXT3_FT_FIFO, [S_IFSOCK >> S_SHIFT] = EXT3_FT_SOCK, [S_IFLNK >> S_SHIFT] = EXT3_FT_SYMLINK, }; static inline void ext3_set_de_type(struct super_block *sb, struct ext3_dir_entry_2 *de, umode_t mode) { if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE)) de->file_type = ext3_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; } /* * Move count entries from end of map between two memory locations. * Returns pointer to last entry moved. */ static struct ext3_dir_entry_2 * dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count) { unsigned rec_len = 0; while (count--) { struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *) (from + map->offs); rec_len = EXT3_DIR_REC_LEN(de->name_len); memcpy (to, de, rec_len); ((struct ext3_dir_entry_2 *) to)->rec_len = ext3_rec_len_to_disk(rec_len); de->inode = 0; map++; to += rec_len; } return (struct ext3_dir_entry_2 *) (to - rec_len); } /* * Compact each dir entry in the range to the minimal rec_len. * Returns pointer to last entry in range. */ static struct ext3_dir_entry_2 *dx_pack_dirents(char *base, unsigned blocksize) { struct ext3_dir_entry_2 *next, *to, *prev; struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *)base; unsigned rec_len = 0; prev = to = de; while ((char *)de < base + blocksize) { next = ext3_next_entry(de); if (de->inode && de->name_len) { rec_len = EXT3_DIR_REC_LEN(de->name_len); if (de > to) memmove(to, de, rec_len); to->rec_len = ext3_rec_len_to_disk(rec_len); prev = to; to = (struct ext3_dir_entry_2 *) (((char *) to) + rec_len); } de = next; } return prev; } /* * Split a full leaf block to make room for a new dir entry. * Allocate a new block, and move entries so that they are approx. equally full. * Returns pointer to de in block into which the new entry will be inserted. */ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, struct buffer_head **bh,struct dx_frame *frame, struct dx_hash_info *hinfo, int *error) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned count, continued; struct buffer_head *bh2; u32 newblock; u32 hash2; struct dx_map_entry *map; char *data1 = (*bh)->b_data, *data2; unsigned split, move, size; struct ext3_dir_entry_2 *de = NULL, *de2; int err = 0, i; bh2 = ext3_append (handle, dir, &newblock, &err); if (!(bh2)) { brelse(*bh); *bh = NULL; goto errout; } BUFFER_TRACE(*bh, "get_write_access"); err = ext3_journal_get_write_access(handle, *bh); if (err) goto journal_error; BUFFER_TRACE(frame->bh, "get_write_access"); err = ext3_journal_get_write_access(handle, frame->bh); if (err) goto journal_error; data2 = bh2->b_data; /* create map in the end of data2 block */ map = (struct dx_map_entry *) (data2 + blocksize); count = dx_make_map ((struct ext3_dir_entry_2 *) data1, blocksize, hinfo, map); map -= count; dx_sort_map (map, count); /* Split the existing block in the middle, size-wise */ size = 0; move = 0; for (i = count-1; i >= 0; i--) { /* is more than half of this entry in 2nd half of the block? */ if (size + map[i].size/2 > blocksize/2) break; size += map[i].size; move++; } /* map index at which we will split */ split = count - move; hash2 = map[split].hash; continued = hash2 == map[split - 1].hash; dxtrace(printk("Split block %i at %x, %i/%i\n", dx_get_block(frame->at), hash2, split, count-split)); /* Fancy dance to stay within two buffers */ de2 = dx_move_dirents(data1, data2, map + split, count - split); de = dx_pack_dirents(data1,blocksize); de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de); de2->rec_len = ext3_rec_len_to_disk(data2 + blocksize - (char *) de2); dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data1, blocksize, 1)); dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data2, blocksize, 1)); /* Which block gets the new entry? */ if (hinfo->hash >= hash2) { swap(*bh, bh2); de = de2; } dx_insert_block (frame, hash2 + continued, newblock); err = ext3_journal_dirty_metadata (handle, bh2); if (err) goto journal_error; err = ext3_journal_dirty_metadata (handle, frame->bh); if (err) goto journal_error; brelse (bh2); dxtrace(dx_show_index ("frame", frame->entries)); return de; journal_error: brelse(*bh); brelse(bh2); *bh = NULL; ext3_std_error(dir->i_sb, err); errout: *error = err; return NULL; } /* * Add a new entry into a directory (leaf) block. If de is non-NULL, * it points to a directory entry which is guaranteed to be large * enough for new directory entry. If de is NULL, then * add_dirent_to_buf will attempt search the directory block for * space. It will return -ENOSPC if no space is available, and -EIO * and -EEXIST if directory entry already exists. * * NOTE! bh is NOT released in the case where ENOSPC is returned. In * all other cases bh is released. */ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, struct inode *inode, struct ext3_dir_entry_2 *de, struct buffer_head * bh) { struct inode *dir = dentry->d_parent->d_inode; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned long offset = 0; unsigned short reclen; int nlen, rlen, err; char *top; reclen = EXT3_DIR_REC_LEN(namelen); if (!de) { de = (struct ext3_dir_entry_2 *)bh->b_data; top = bh->b_data + dir->i_sb->s_blocksize - reclen; while ((char *) de <= top) { if (!ext3_check_dir_entry("ext3_add_entry", dir, de, bh, offset)) { brelse (bh); return -EIO; } if (ext3_match (namelen, name, de)) { brelse (bh); return -EEXIST; } nlen = EXT3_DIR_REC_LEN(de->name_len); rlen = ext3_rec_len_from_disk(de->rec_len); if ((de->inode? rlen - nlen: rlen) >= reclen) break; de = (struct ext3_dir_entry_2 *)((char *)de + rlen); offset += rlen; } if ((char *) de > top) return -ENOSPC; } BUFFER_TRACE(bh, "get_write_access"); err = ext3_journal_get_write_access(handle, bh); if (err) { ext3_std_error(dir->i_sb, err); brelse(bh); return err; } /* By now the buffer is marked for journaling */ nlen = EXT3_DIR_REC_LEN(de->name_len); rlen = ext3_rec_len_from_disk(de->rec_len); if (de->inode) { struct ext3_dir_entry_2 *de1 = (struct ext3_dir_entry_2 *)((char *)de + nlen); de1->rec_len = ext3_rec_len_to_disk(rlen - nlen); de->rec_len = ext3_rec_len_to_disk(nlen); de = de1; } de->file_type = EXT3_FT_UNKNOWN; if (inode) { de->inode = cpu_to_le32(inode->i_ino); ext3_set_de_type(dir->i_sb, de, inode->i_mode); } else de->inode = 0; de->name_len = namelen; memcpy (de->name, name, namelen); /* * XXX shouldn't update any times until successful * completion of syscall, but too many callers depend * on this. * * XXX similarly, too many callers depend on * ext3_new_inode() setting the times, but error * recovery deletes the inode, so the worst that can * happen is that the times are slightly out of date * and/or different from the directory change time. */ dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; ext3_update_dx_flag(dir); dir->i_version++; ext3_mark_inode_dirty(handle, dir); BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); err = ext3_journal_dirty_metadata(handle, bh); if (err) ext3_std_error(dir->i_sb, err); brelse(bh); return 0; } /* * This converts a one block unindexed directory to a 3 block indexed * directory, and adds the dentry to the indexed directory. */ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, struct inode *inode, struct buffer_head *bh) { struct inode *dir = dentry->d_parent->d_inode; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct buffer_head *bh2; struct dx_root *root; struct dx_frame frames[2], *frame; struct dx_entry *entries; struct ext3_dir_entry_2 *de, *de2; char *data1, *top; unsigned len; int retval; unsigned blocksize; struct dx_hash_info hinfo; u32 block; struct fake_dirent *fde; blocksize = dir->i_sb->s_blocksize; dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino)); retval = ext3_journal_get_write_access(handle, bh); if (retval) { ext3_std_error(dir->i_sb, retval); brelse(bh); return retval; } root = (struct dx_root *) bh->b_data; /* The 0th block becomes the root, move the dirents out */ fde = &root->dotdot; de = (struct ext3_dir_entry_2 *)((char *)fde + ext3_rec_len_from_disk(fde->rec_len)); if ((char *) de >= (((char *) root) + blocksize)) { ext3_error(dir->i_sb, __func__, "invalid rec_len for '..' in inode %lu", dir->i_ino); brelse(bh); return -EIO; } len = ((char *) root) + blocksize - (char *) de; bh2 = ext3_append (handle, dir, &block, &retval); if (!(bh2)) { brelse(bh); return retval; } EXT3_I(dir)->i_flags |= EXT3_INDEX_FL; data1 = bh2->b_data; memcpy (data1, de, len); de = (struct ext3_dir_entry_2 *) data1; top = data1 + len; while ((char *)(de2 = ext3_next_entry(de)) < top) de = de2; de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de); /* Initialize the root; the dot dirents already exist */ de = (struct ext3_dir_entry_2 *) (&root->dotdot); de->rec_len = ext3_rec_len_to_disk(blocksize - EXT3_DIR_REC_LEN(2)); memset (&root->info, 0, sizeof(root->info)); root->info.info_length = sizeof(root->info); root->info.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version; entries = root->entries; dx_set_block (entries, 1); dx_set_count (entries, 1); dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info))); /* Initialize as for dx_probe */ hinfo.hash_version = root->info.hash_version; if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned; hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed; ext3fs_dirhash(name, namelen, &hinfo); frame = frames; frame->entries = entries; frame->at = entries; frame->bh = bh; bh = bh2; de = do_split(handle,dir, &bh, frame, &hinfo, &retval); dx_release (frames); if (!(de)) return retval; return add_dirent_to_buf(handle, dentry, inode, de, bh); } /* * ext3_add_entry() * * adds a file entry to the specified directory, using the same * semantics as ext3_find_entry(). It returns NULL if it failed. * * NOTE!! The inode part of 'de' is left at 0 - which means you * may not sleep between calling this and putting something into * the entry, as someone else might have used it while you slept. */ static int ext3_add_entry (handle_t *handle, struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; unsigned long offset; struct buffer_head * bh; struct ext3_dir_entry_2 *de; struct super_block * sb; int retval; int dx_fallback=0; unsigned blocksize; u32 block, blocks; sb = dir->i_sb; blocksize = sb->s_blocksize; if (!dentry->d_name.len) return -EINVAL; if (is_dx(dir)) { retval = ext3_dx_add_entry(handle, dentry, inode); if (!retval || (retval != ERR_BAD_DX_DIR)) return retval; EXT3_I(dir)->i_flags &= ~EXT3_INDEX_FL; dx_fallback++; ext3_mark_inode_dirty(handle, dir); } blocks = dir->i_size >> sb->s_blocksize_bits; for (block = 0, offset = 0; block < blocks; block++) { bh = ext3_bread(handle, dir, block, 0, &retval); if(!bh) return retval; retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); if (retval != -ENOSPC) return retval; if (blocks == 1 && !dx_fallback && EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX)) return make_indexed_dir(handle, dentry, inode, bh); brelse(bh); } bh = ext3_append(handle, dir, &block, &retval); if (!bh) return retval; de = (struct ext3_dir_entry_2 *) bh->b_data; de->inode = 0; de->rec_len = ext3_rec_len_to_disk(blocksize); return add_dirent_to_buf(handle, dentry, inode, de, bh); } /* * Returns 0 for success, or a negative error value */ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode) { struct dx_frame frames[2], *frame; struct dx_entry *entries, *at; struct dx_hash_info hinfo; struct buffer_head * bh; struct inode *dir = dentry->d_parent->d_inode; struct super_block * sb = dir->i_sb; struct ext3_dir_entry_2 *de; int err; frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err); if (!frame) return err; entries = frame->entries; at = frame->at; if (!(bh = ext3_bread(handle,dir, dx_get_block(frame->at), 0, &err))) goto cleanup; BUFFER_TRACE(bh, "get_write_access"); err = ext3_journal_get_write_access(handle, bh); if (err) goto journal_error; err = add_dirent_to_buf(handle, dentry, inode, NULL, bh); if (err != -ENOSPC) { bh = NULL; goto cleanup; } /* Block full, should compress but for now just split */ dxtrace(printk("using %u of %u node entries\n", dx_get_count(entries), dx_get_limit(entries))); /* Need to split index? */ if (dx_get_count(entries) == dx_get_limit(entries)) { u32 newblock; unsigned icount = dx_get_count(entries); int levels = frame - frames; struct dx_entry *entries2; struct dx_node *node2; struct buffer_head *bh2; if (levels && (dx_get_count(frames->entries) == dx_get_limit(frames->entries))) { ext3_warning(sb, __func__, "Directory index full!"); err = -ENOSPC; goto cleanup; } bh2 = ext3_append (handle, dir, &newblock, &err); if (!(bh2)) goto cleanup; node2 = (struct dx_node *)(bh2->b_data); entries2 = node2->entries; node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize); node2->fake.inode = 0; BUFFER_TRACE(frame->bh, "get_write_access"); err = ext3_journal_get_write_access(handle, frame->bh); if (err) goto journal_error; if (levels) { unsigned icount1 = icount/2, icount2 = icount - icount1; unsigned hash2 = dx_get_hash(entries + icount1); dxtrace(printk("Split index %i/%i\n", icount1, icount2)); BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */ err = ext3_journal_get_write_access(handle, frames[0].bh); if (err) goto journal_error; memcpy ((char *) entries2, (char *) (entries + icount1), icount2 * sizeof(struct dx_entry)); dx_set_count (entries, icount1); dx_set_count (entries2, icount2); dx_set_limit (entries2, dx_node_limit(dir)); /* Which index block gets the new entry? */ if (at - entries >= icount1) { frame->at = at = at - entries - icount1 + entries2; frame->entries = entries = entries2; swap(frame->bh, bh2); } dx_insert_block (frames + 0, hash2, newblock); dxtrace(dx_show_index ("node", frames[1].entries)); dxtrace(dx_show_index ("node", ((struct dx_node *) bh2->b_data)->entries)); err = ext3_journal_dirty_metadata(handle, bh2); if (err) goto journal_error; brelse (bh2); } else { dxtrace(printk("Creating second level index...\n")); memcpy((char *) entries2, (char *) entries, icount * sizeof(struct dx_entry)); dx_set_limit(entries2, dx_node_limit(dir)); /* Set up root */ dx_set_count(entries, 1); dx_set_block(entries + 0, newblock); ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1; /* Add new access path frame */ frame = frames + 1; frame->at = at = at - entries + entries2; frame->entries = entries = entries2; frame->bh = bh2; err = ext3_journal_get_write_access(handle, frame->bh); if (err) goto journal_error; } ext3_journal_dirty_metadata(handle, frames[0].bh); } de = do_split(handle, dir, &bh, frame, &hinfo, &err); if (!de) goto cleanup; err = add_dirent_to_buf(handle, dentry, inode, de, bh); bh = NULL; goto cleanup; journal_error: ext3_std_error(dir->i_sb, err); cleanup: if (bh) brelse(bh); dx_release(frames); return err; } /* * ext3_delete_entry deletes a directory entry by merging it with the * previous entry */ static int ext3_delete_entry (handle_t *handle, struct inode * dir, struct ext3_dir_entry_2 * de_del, struct buffer_head * bh) { struct ext3_dir_entry_2 * de, * pde; int i; i = 0; pde = NULL; de = (struct ext3_dir_entry_2 *) bh->b_data; while (i < bh->b_size) { if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i)) return -EIO; if (de == de_del) { BUFFER_TRACE(bh, "get_write_access"); ext3_journal_get_write_access(handle, bh); if (pde) pde->rec_len = ext3_rec_len_to_disk( ext3_rec_len_from_disk(pde->rec_len) + ext3_rec_len_from_disk(de->rec_len)); else de->inode = 0; dir->i_version++; BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); ext3_journal_dirty_metadata(handle, bh); return 0; } i += ext3_rec_len_from_disk(de->rec_len); pde = de; de = ext3_next_entry(de); } return -ENOENT; } static int ext3_add_nondir(handle_t *handle, struct dentry *dentry, struct inode *inode) { int err = ext3_add_entry(handle, dentry, inode); if (!err) { ext3_mark_inode_dirty(handle, inode); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; } drop_nlink(inode); unlock_new_inode(inode); iput(inode); return err; } /* * By the time this is called, we already have created * the directory cache entry for the new file, but it * is so far negative - it has no inode. * * If the create succeeds, we fill in the inode information * with d_instantiate(). */ static int ext3_create (struct inode * dir, struct dentry * dentry, int mode, struct nameidata *nd) { handle_t *handle; struct inode * inode; int err, retries = 0; dquot_initialize(dir); retry: handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) handle->h_sync = 1; inode = ext3_new_inode (handle, dir, mode); err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext3_file_inode_operations; inode->i_fop = &ext3_file_operations; ext3_set_aops(inode); err = ext3_add_nondir(handle, dentry, inode); } ext3_journal_stop(handle); if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } static int ext3_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev) { handle_t *handle; struct inode *inode; int err, retries = 0; if (!new_valid_dev(rdev)) return -EINVAL; dquot_initialize(dir); retry: handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) handle->h_sync = 1; inode = ext3_new_inode (handle, dir, mode); err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); #ifdef CONFIG_EXT3_FS_XATTR inode->i_op = &ext3_special_inode_operations; #endif err = ext3_add_nondir(handle, dentry, inode); } ext3_journal_stop(handle); if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode) { handle_t *handle; struct inode * inode; struct buffer_head * dir_block; struct ext3_dir_entry_2 * de; int err, retries = 0; if (dir->i_nlink >= EXT3_LINK_MAX) return -EMLINK; dquot_initialize(dir); retry: handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) handle->h_sync = 1; inode = ext3_new_inode (handle, dir, S_IFDIR | mode); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_stop; inode->i_op = &ext3_dir_inode_operations; inode->i_fop = &ext3_dir_operations; inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize; dir_block = ext3_bread (handle, inode, 0, 1, &err); if (!dir_block) { drop_nlink(inode); /* is this nlink == 0? */ unlock_new_inode(inode); ext3_mark_inode_dirty(handle, inode); iput (inode); goto out_stop; } BUFFER_TRACE(dir_block, "get_write_access"); ext3_journal_get_write_access(handle, dir_block); de = (struct ext3_dir_entry_2 *) dir_block->b_data; de->inode = cpu_to_le32(inode->i_ino); de->name_len = 1; de->rec_len = ext3_rec_len_to_disk(EXT3_DIR_REC_LEN(de->name_len)); strcpy (de->name, "."); ext3_set_de_type(dir->i_sb, de, S_IFDIR); de = ext3_next_entry(de); de->inode = cpu_to_le32(dir->i_ino); de->rec_len = ext3_rec_len_to_disk(inode->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1)); de->name_len = 2; strcpy (de->name, ".."); ext3_set_de_type(dir->i_sb, de, S_IFDIR); inode->i_nlink = 2; BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata"); ext3_journal_dirty_metadata(handle, dir_block); brelse (dir_block); ext3_mark_inode_dirty(handle, inode); err = ext3_add_entry (handle, dentry, inode); if (err) { inode->i_nlink = 0; unlock_new_inode(inode); ext3_mark_inode_dirty(handle, inode); iput (inode); goto out_stop; } inc_nlink(dir); ext3_update_dx_flag(dir); ext3_mark_inode_dirty(handle, dir); d_instantiate(dentry, inode); unlock_new_inode(inode); out_stop: ext3_journal_stop(handle); if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } /* * routine to check that the specified directory is empty (for rmdir) */ static int empty_dir (struct inode * inode) { unsigned long offset; struct buffer_head * bh; struct ext3_dir_entry_2 * de, * de1; struct super_block * sb; int err = 0; sb = inode->i_sb; if (inode->i_size < EXT3_DIR_REC_LEN(1) + EXT3_DIR_REC_LEN(2) || !(bh = ext3_bread (NULL, inode, 0, 0, &err))) { if (err) ext3_error(inode->i_sb, __func__, "error %d reading directory #%lu offset 0", err, inode->i_ino); else ext3_warning(inode->i_sb, __func__, "bad directory (dir #%lu) - no data block", inode->i_ino); return 1; } de = (struct ext3_dir_entry_2 *) bh->b_data; de1 = ext3_next_entry(de); if (le32_to_cpu(de->inode) != inode->i_ino || !le32_to_cpu(de1->inode) || strcmp (".", de->name) || strcmp ("..", de1->name)) { ext3_warning (inode->i_sb, "empty_dir", "bad directory (dir #%lu) - no `.' or `..'", inode->i_ino); brelse (bh); return 1; } offset = ext3_rec_len_from_disk(de->rec_len) + ext3_rec_len_from_disk(de1->rec_len); de = ext3_next_entry(de1); while (offset < inode->i_size ) { if (!bh || (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) { err = 0; brelse (bh); bh = ext3_bread (NULL, inode, offset >> EXT3_BLOCK_SIZE_BITS(sb), 0, &err); if (!bh) { if (err) ext3_error(sb, __func__, "error %d reading directory" " #%lu offset %lu", err, inode->i_ino, offset); offset += sb->s_blocksize; continue; } de = (struct ext3_dir_entry_2 *) bh->b_data; } if (!ext3_check_dir_entry("empty_dir", inode, de, bh, offset)) { de = (struct ext3_dir_entry_2 *)(bh->b_data + sb->s_blocksize); offset = (offset | (sb->s_blocksize - 1)) + 1; continue; } if (le32_to_cpu(de->inode)) { brelse (bh); return 0; } offset += ext3_rec_len_from_disk(de->rec_len); de = ext3_next_entry(de); } brelse (bh); return 1; } /* ext3_orphan_add() links an unlinked or truncated inode into a list of * such inodes, starting at the superblock, in case we crash before the * file is closed/deleted, or in case the inode truncate spans multiple * transactions and the last transaction is not recovered after a crash. * * At filesystem recovery time, we walk this list deleting unlinked * inodes and truncating linked inodes in ext3_orphan_cleanup(). */ int ext3_orphan_add(handle_t *handle, struct inode *inode) { struct super_block *sb = inode->i_sb; struct ext3_iloc iloc; int err = 0, rc; mutex_lock(&EXT3_SB(sb)->s_orphan_lock); if (!list_empty(&EXT3_I(inode)->i_orphan)) goto out_unlock; /* Orphan handling is only valid for files with data blocks * being truncated, or files being unlinked. */ /* @@@ FIXME: Observation from aviro: * I think I can trigger J_ASSERT in ext3_orphan_add(). We block * here (on s_orphan_lock), so race with ext3_link() which might bump * ->i_nlink. For, say it, character device. Not a regular file, * not a directory, not a symlink and ->i_nlink > 0. * * tytso, 4/25/2009: I'm not sure how that could happen; * shouldn't the fs core protect us from these sort of * unlink()/link() races? */ J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access"); err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh); if (err) goto out_unlock; err = ext3_reserve_inode_write(handle, inode, &iloc); if (err) goto out_unlock; /* Insert this inode at the head of the on-disk orphan list... */ NEXT_ORPHAN(inode) = le32_to_cpu(EXT3_SB(sb)->s_es->s_last_orphan); EXT3_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); rc = ext3_mark_iloc_dirty(handle, inode, &iloc); if (!err) err = rc; /* Only add to the head of the in-memory list if all the * previous operations succeeded. If the orphan_add is going to * fail (possibly taking the journal offline), we can't risk * leaving the inode on the orphan list: stray orphan-list * entries can cause panics at unmount time. * * This is safe: on error we're going to ignore the orphan list * anyway on the next recovery. */ if (!err) list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); jbd_debug(4, "superblock will point to %lu\n", inode->i_ino); jbd_debug(4, "orphan inode %lu will point to %d\n", inode->i_ino, NEXT_ORPHAN(inode)); out_unlock: mutex_unlock(&EXT3_SB(sb)->s_orphan_lock); ext3_std_error(inode->i_sb, err); return err; } /* * ext3_orphan_del() removes an unlinked or truncated inode from the list * of such inodes stored on disk, because it is finally being cleaned up. */ int ext3_orphan_del(handle_t *handle, struct inode *inode) { struct list_head *prev; struct ext3_inode_info *ei = EXT3_I(inode); struct ext3_sb_info *sbi; unsigned long ino_next; struct ext3_iloc iloc; int err = 0; mutex_lock(&EXT3_SB(inode->i_sb)->s_orphan_lock); if (list_empty(&ei->i_orphan)) goto out; ino_next = NEXT_ORPHAN(inode); prev = ei->i_orphan.prev; sbi = EXT3_SB(inode->i_sb); jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino); list_del_init(&ei->i_orphan); /* If we're on an error path, we may not have a valid * transaction handle with which to update the orphan list on * disk, but we still need to remove the inode from the linked * list in memory. */ if (!handle) goto out; err = ext3_reserve_inode_write(handle, inode, &iloc); if (err) goto out_err; if (prev == &sbi->s_orphan) { jbd_debug(4, "superblock will point to %lu\n", ino_next); BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext3_journal_get_write_access(handle, sbi->s_sbh); if (err) goto out_brelse; sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); err = ext3_journal_dirty_metadata(handle, sbi->s_sbh); } else { struct ext3_iloc iloc2; struct inode *i_prev = &list_entry(prev, struct ext3_inode_info, i_orphan)->vfs_inode; jbd_debug(4, "orphan inode %lu will point to %lu\n", i_prev->i_ino, ino_next); err = ext3_reserve_inode_write(handle, i_prev, &iloc2); if (err) goto out_brelse; NEXT_ORPHAN(i_prev) = ino_next; err = ext3_mark_iloc_dirty(handle, i_prev, &iloc2); } if (err) goto out_brelse; NEXT_ORPHAN(inode) = 0; err = ext3_mark_iloc_dirty(handle, inode, &iloc); out_err: ext3_std_error(inode->i_sb, err); out: mutex_unlock(&EXT3_SB(inode->i_sb)->s_orphan_lock); return err; out_brelse: brelse(iloc.bh); goto out_err; } static int ext3_rmdir (struct inode * dir, struct dentry *dentry) { int retval; struct inode * inode; struct buffer_head * bh; struct ext3_dir_entry_2 * de; handle_t *handle; /* Initialize quotas before so that eventual writes go in * separate transaction */ dquot_initialize(dir); dquot_initialize(dentry->d_inode); handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); retval = -ENOENT; bh = ext3_find_entry(dir, &dentry->d_name, &de); if (!bh) goto end_rmdir; if (IS_DIRSYNC(dir)) handle->h_sync = 1; inode = dentry->d_inode; retval = -EIO; if (le32_to_cpu(de->inode) != inode->i_ino) goto end_rmdir; retval = -ENOTEMPTY; if (!empty_dir (inode)) goto end_rmdir; retval = ext3_delete_entry(handle, dir, de, bh); if (retval) goto end_rmdir; if (inode->i_nlink != 2) ext3_warning (inode->i_sb, "ext3_rmdir", "empty directory has nlink!=2 (%d)", inode->i_nlink); inode->i_version++; clear_nlink(inode); /* There's no need to set i_disksize: the fact that i_nlink is * zero will ensure that the right thing happens during any * recovery. */ inode->i_size = 0; ext3_orphan_add(handle, inode); inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, inode); drop_nlink(dir); ext3_update_dx_flag(dir); ext3_mark_inode_dirty(handle, dir); end_rmdir: ext3_journal_stop(handle); brelse (bh); return retval; } static int ext3_unlink(struct inode * dir, struct dentry *dentry) { int retval; struct inode * inode; struct buffer_head * bh; struct ext3_dir_entry_2 * de; handle_t *handle; /* Initialize quotas before so that eventual writes go * in separate transaction */ dquot_initialize(dir); dquot_initialize(dentry->d_inode); handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) handle->h_sync = 1; retval = -ENOENT; bh = ext3_find_entry(dir, &dentry->d_name, &de); if (!bh) goto end_unlink; inode = dentry->d_inode; retval = -EIO; if (le32_to_cpu(de->inode) != inode->i_ino) goto end_unlink; if (!inode->i_nlink) { ext3_warning (inode->i_sb, "ext3_unlink", "Deleting nonexistent file (%lu), %d", inode->i_ino, inode->i_nlink); inode->i_nlink = 1; } retval = ext3_delete_entry(handle, dir, de, bh); if (retval) goto end_unlink; dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC; ext3_update_dx_flag(dir); ext3_mark_inode_dirty(handle, dir); drop_nlink(inode); if (!inode->i_nlink) ext3_orphan_add(handle, inode); inode->i_ctime = dir->i_ctime; ext3_mark_inode_dirty(handle, inode); retval = 0; end_unlink: ext3_journal_stop(handle); brelse (bh); return retval; } static int ext3_symlink (struct inode * dir, struct dentry *dentry, const char * symname) { handle_t *handle; struct inode * inode; int l, err, retries = 0; l = strlen(symname)+1; if (l > dir->i_sb->s_blocksize) return -ENAMETOOLONG; dquot_initialize(dir); retry: handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) handle->h_sync = 1; inode = ext3_new_inode (handle, dir, S_IFLNK|S_IRWXUGO); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_stop; if (l > sizeof (EXT3_I(inode)->i_data)) { inode->i_op = &ext3_symlink_inode_operations; ext3_set_aops(inode); /* * page_symlink() calls into ext3_prepare/commit_write. * We have a transaction open. All is sweetness. It also sets * i_size in generic_commit_write(). */ err = __page_symlink(inode, symname, l, 1); if (err) { drop_nlink(inode); unlock_new_inode(inode); ext3_mark_inode_dirty(handle, inode); iput (inode); goto out_stop; } } else { inode->i_op = &ext3_fast_symlink_inode_operations; memcpy((char*)&EXT3_I(inode)->i_data,symname,l); inode->i_size = l-1; } EXT3_I(inode)->i_disksize = inode->i_size; err = ext3_add_nondir(handle, dentry, inode); out_stop: ext3_journal_stop(handle); if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } static int ext3_link (struct dentry * old_dentry, struct inode * dir, struct dentry *dentry) { handle_t *handle; struct inode *inode = old_dentry->d_inode; int err, retries = 0; if (inode->i_nlink >= EXT3_LINK_MAX) return -EMLINK; dquot_initialize(dir); /* * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing * otherwise has the potential to corrupt the orphan inode list. */ if (inode->i_nlink == 0) return -ENOENT; retry: handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + EXT3_INDEX_EXTRA_TRANS_BLOCKS); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) handle->h_sync = 1; inode->i_ctime = CURRENT_TIME_SEC; inc_nlink(inode); atomic_inc(&inode->i_count); err = ext3_add_entry(handle, dentry, inode); if (!err) { ext3_mark_inode_dirty(handle, inode); d_instantiate(dentry, inode); } else { drop_nlink(inode); iput(inode); } ext3_journal_stop(handle); if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } #define PARENT_INO(buffer) \ (ext3_next_entry((struct ext3_dir_entry_2 *)(buffer))->inode) /* * Anybody can rename anything with this: the permission checks are left to the * higher-level routines. */ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry, struct inode * new_dir,struct dentry *new_dentry) { handle_t *handle; struct inode * old_inode, * new_inode; struct buffer_head * old_bh, * new_bh, * dir_bh; struct ext3_dir_entry_2 * old_de, * new_de; int retval, flush_file = 0; dquot_initialize(old_dir); dquot_initialize(new_dir); old_bh = new_bh = dir_bh = NULL; /* Initialize quotas before so that eventual writes go * in separate transaction */ if (new_dentry->d_inode) dquot_initialize(new_dentry->d_inode); handle = ext3_journal_start(old_dir, 2 * EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) handle->h_sync = 1; old_bh = ext3_find_entry(old_dir, &old_dentry->d_name, &old_de); /* * Check for inode number is _not_ due to possible IO errors. * We might rmdir the source, keep it as pwd of some process * and merrily kill the link to whatever was created under the * same name. Goodbye sticky bit ;-< */ old_inode = old_dentry->d_inode; retval = -ENOENT; if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino) goto end_rename; new_inode = new_dentry->d_inode; new_bh = ext3_find_entry(new_dir, &new_dentry->d_name, &new_de); if (new_bh) { if (!new_inode) { brelse (new_bh); new_bh = NULL; } } if (S_ISDIR(old_inode->i_mode)) { if (new_inode) { retval = -ENOTEMPTY; if (!empty_dir (new_inode)) goto end_rename; } retval = -EIO; dir_bh = ext3_bread (handle, old_inode, 0, 0, &retval); if (!dir_bh) goto end_rename; if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino) goto end_rename; retval = -EMLINK; if (!new_inode && new_dir!=old_dir && new_dir->i_nlink >= EXT3_LINK_MAX) goto end_rename; } if (!new_bh) { retval = ext3_add_entry (handle, new_dentry, old_inode); if (retval) goto end_rename; } else { BUFFER_TRACE(new_bh, "get write access"); ext3_journal_get_write_access(handle, new_bh); new_de->inode = cpu_to_le32(old_inode->i_ino); if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb, EXT3_FEATURE_INCOMPAT_FILETYPE)) new_de->file_type = old_de->file_type; new_dir->i_version++; new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, new_dir); BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata"); ext3_journal_dirty_metadata(handle, new_bh); brelse(new_bh); new_bh = NULL; } /* * Like most other Unix systems, set the ctime for inodes on a * rename. */ old_inode->i_ctime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, old_inode); /* * ok, that's it */ if (le32_to_cpu(old_de->inode) != old_inode->i_ino || old_de->name_len != old_dentry->d_name.len || strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) || (retval = ext3_delete_entry(handle, old_dir, old_de, old_bh)) == -ENOENT) { /* old_de could have moved from under us during htree split, so * make sure that we are deleting the right entry. We might * also be pointing to a stale entry in the unused part of * old_bh so just checking inum and the name isn't enough. */ struct buffer_head *old_bh2; struct ext3_dir_entry_2 *old_de2; old_bh2 = ext3_find_entry(old_dir, &old_dentry->d_name, &old_de2); if (old_bh2) { retval = ext3_delete_entry(handle, old_dir, old_de2, old_bh2); brelse(old_bh2); } } if (retval) { ext3_warning(old_dir->i_sb, "ext3_rename", "Deleting old file (%lu), %d, error=%d", old_dir->i_ino, old_dir->i_nlink, retval); } if (new_inode) { drop_nlink(new_inode); new_inode->i_ctime = CURRENT_TIME_SEC; } old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC; ext3_update_dx_flag(old_dir); if (dir_bh) { BUFFER_TRACE(dir_bh, "get_write_access"); ext3_journal_get_write_access(handle, dir_bh); PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino); BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata"); ext3_journal_dirty_metadata(handle, dir_bh); drop_nlink(old_dir); if (new_inode) { drop_nlink(new_inode); } else { inc_nlink(new_dir); ext3_update_dx_flag(new_dir); ext3_mark_inode_dirty(handle, new_dir); } } ext3_mark_inode_dirty(handle, old_dir); if (new_inode) { ext3_mark_inode_dirty(handle, new_inode); if (!new_inode->i_nlink) ext3_orphan_add(handle, new_inode); if (ext3_should_writeback_data(new_inode)) flush_file = 1; } retval = 0; end_rename: brelse (dir_bh); brelse (old_bh); brelse (new_bh); ext3_journal_stop(handle); if (retval == 0 && flush_file) filemap_flush(old_inode->i_mapping); return retval; } /* * directories can handle most operations... */ const struct inode_operations ext3_dir_inode_operations = { .create = ext3_create, .lookup = ext3_lookup, .link = ext3_link, .unlink = ext3_unlink, .symlink = ext3_symlink, .mkdir = ext3_mkdir, .rmdir = ext3_rmdir, .mknod = ext3_mknod, .rename = ext3_rename, .setattr = ext3_setattr, #ifdef CONFIG_EXT3_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext3_listxattr, .removexattr = generic_removexattr, #endif .check_acl = ext3_check_acl, }; const struct inode_operations ext3_special_inode_operations = { .setattr = ext3_setattr, #ifdef CONFIG_EXT3_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext3_listxattr, .removexattr = generic_removexattr, #endif .check_acl = ext3_check_acl, };
gpl-2.0
EmcraftSystems/linux-emcraft
drivers/gpu/drm/ttm/ttm_global.c
1330
3085
/************************************************************************** * * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ #include "ttm/ttm_module.h" #include <linux/mutex.h> #include <linux/slab.h> #include <linux/module.h> struct ttm_global_item { struct mutex mutex; void *object; int refcount; }; static struct ttm_global_item glob[TTM_GLOBAL_NUM]; void ttm_global_init(void) { int i; for (i = 0; i < TTM_GLOBAL_NUM; ++i) { struct ttm_global_item *item = &glob[i]; mutex_init(&item->mutex); item->object = NULL; item->refcount = 0; } } void ttm_global_release(void) { int i; for (i = 0; i < TTM_GLOBAL_NUM; ++i) { struct ttm_global_item *item = &glob[i]; BUG_ON(item->object != NULL); BUG_ON(item->refcount != 0); } } int ttm_global_item_ref(struct ttm_global_reference *ref) { int ret; struct ttm_global_item *item = &glob[ref->global_type]; void *object; mutex_lock(&item->mutex); if (item->refcount == 0) { item->object = kzalloc(ref->size, GFP_KERNEL); if (unlikely(item->object == NULL)) { ret = -ENOMEM; goto out_err; } ref->object = item->object; ret = ref->init(ref); if (unlikely(ret != 0)) goto out_err; } ++item->refcount; ref->object = item->object; object = item->object; mutex_unlock(&item->mutex); return 0; out_err: mutex_unlock(&item->mutex); item->object = NULL; return ret; } EXPORT_SYMBOL(ttm_global_item_ref); void ttm_global_item_unref(struct ttm_global_reference *ref) { struct ttm_global_item *item = &glob[ref->global_type]; mutex_lock(&item->mutex); BUG_ON(item->refcount == 0); BUG_ON(ref->object != item->object); if (--item->refcount == 0) { ref->release(ref); item->object = NULL; } mutex_unlock(&item->mutex); } EXPORT_SYMBOL(ttm_global_item_unref);
gpl-2.0
me4488/NOPE_Kernel_V2
drivers/net/wan/hdlc_ppp.c
3634
19261
/* * Generic HDLC support routines for Linux * Point-to-point protocol support * * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. */ #include <linux/errno.h> #include <linux/hdlc.h> #include <linux/if_arp.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pkt_sched.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #define DEBUG_CP 0 /* also bytes# to dump */ #define DEBUG_STATE 0 #define DEBUG_HARD_HEADER 0 #define HDLC_ADDR_ALLSTATIONS 0xFF #define HDLC_CTRL_UI 0x03 #define PID_LCP 0xC021 #define PID_IP 0x0021 #define PID_IPCP 0x8021 #define PID_IPV6 0x0057 #define PID_IPV6CP 0x8057 enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT}; enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ, CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY, LCP_DISC_REQ, CP_CODES}; #if DEBUG_CP static const char *const code_names[CP_CODES] = { "0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq", "TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard" }; static char debug_buffer[64 + 3 * DEBUG_CP]; #endif enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5}; struct hdlc_header { u8 address; u8 control; __be16 protocol; }; struct cp_header { u8 code; u8 id; __be16 len; }; struct proto { struct net_device *dev; struct timer_list timer; unsigned long timeout; u16 pid; /* protocol ID */ u8 state; u8 cr_id; /* ID of last Configuration-Request */ u8 restart_counter; }; struct ppp { struct proto protos[IDX_COUNT]; spinlock_t lock; unsigned long last_pong; unsigned int req_timeout, cr_retries, term_retries; unsigned int keepalive_interval, keepalive_timeout; u8 seq; /* local sequence number for requests */ u8 echo_id; /* ID of last Echo-Request (LCP) */ }; enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED, STATES, STATE_MASK = 0xF}; enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA, RUC, RXJ_GOOD, RXJ_BAD, EVENTS}; enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100, SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000}; #if DEBUG_STATE static const char *const state_names[STATES] = { "Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent", "Opened" }; static const char *const event_names[EVENTS] = { "Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN", "RTR", "RTA", "RUC", "RXJ+", "RXJ-" }; #endif static struct sk_buff_head tx_queue; /* used when holding the spin lock */ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr); static inline struct ppp* get_ppp(struct net_device *dev) { return (struct ppp *)dev_to_hdlc(dev)->state; } static inline struct proto* get_proto(struct net_device *dev, u16 pid) { struct ppp *ppp = get_ppp(dev); switch (pid) { case PID_LCP: return &ppp->protos[IDX_LCP]; case PID_IPCP: return &ppp->protos[IDX_IPCP]; case PID_IPV6CP: return &ppp->protos[IDX_IPV6CP]; default: return NULL; } } static inline const char* proto_name(u16 pid) { switch (pid) { case PID_LCP: return "LCP"; case PID_IPCP: return "IPCP"; case PID_IPV6CP: return "IPV6CP"; default: return NULL; } } static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev) { struct hdlc_header *data = (struct hdlc_header*)skb->data; if (skb->len < sizeof(struct hdlc_header)) return htons(ETH_P_HDLC); if (data->address != HDLC_ADDR_ALLSTATIONS || data->control != HDLC_CTRL_UI) return htons(ETH_P_HDLC); switch (data->protocol) { case cpu_to_be16(PID_IP): skb_pull(skb, sizeof(struct hdlc_header)); return htons(ETH_P_IP); case cpu_to_be16(PID_IPV6): skb_pull(skb, sizeof(struct hdlc_header)); return htons(ETH_P_IPV6); default: return htons(ETH_P_HDLC); } } static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev, u16 type, const void *daddr, const void *saddr, unsigned int len) { struct hdlc_header *data; #if DEBUG_HARD_HEADER printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name); #endif skb_push(skb, sizeof(struct hdlc_header)); data = (struct hdlc_header*)skb->data; data->address = HDLC_ADDR_ALLSTATIONS; data->control = HDLC_CTRL_UI; switch (type) { case ETH_P_IP: data->protocol = htons(PID_IP); break; case ETH_P_IPV6: data->protocol = htons(PID_IPV6); break; case PID_LCP: case PID_IPCP: case PID_IPV6CP: data->protocol = htons(type); break; default: /* unknown protocol */ data->protocol = 0; } return sizeof(struct hdlc_header); } static void ppp_tx_flush(void) { struct sk_buff *skb; while ((skb = skb_dequeue(&tx_queue)) != NULL) dev_queue_xmit(skb); } static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code, u8 id, unsigned int len, const void *data) { struct sk_buff *skb; struct cp_header *cp; unsigned int magic_len = 0; static u32 magic; #if DEBUG_CP int i; char *ptr; #endif if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY)) magic_len = sizeof(magic); skb = dev_alloc_skb(sizeof(struct hdlc_header) + sizeof(struct cp_header) + magic_len + len); if (!skb) { printk(KERN_WARNING "%s: out of memory in ppp_tx_cp()\n", dev->name); return; } skb_reserve(skb, sizeof(struct hdlc_header)); cp = (struct cp_header *)skb_put(skb, sizeof(struct cp_header)); cp->code = code; cp->id = id; cp->len = htons(sizeof(struct cp_header) + magic_len + len); if (magic_len) memcpy(skb_put(skb, magic_len), &magic, magic_len); if (len) memcpy(skb_put(skb, len), data, len); #if DEBUG_CP BUG_ON(code >= CP_CODES); ptr = debug_buffer; *ptr = '\x0'; for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) { sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]); ptr += strlen(ptr); } printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name, proto_name(pid), code_names[code], id, debug_buffer); #endif ppp_hard_header(skb, dev, pid, NULL, NULL, 0); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; skb_reset_network_header(skb); skb_queue_tail(&tx_queue, skb); } /* State transition table (compare STD-51) Events Actions TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count TO- = Timeout with counter expired zrc = Zero-Restart-Count RCR+ = Receive-Configure-Request (Good) scr = Send-Configure-Request RCR- = Receive-Configure-Request (Bad) RCA = Receive-Configure-Ack sca = Send-Configure-Ack RCN = Receive-Configure-Nak/Rej scn = Send-Configure-Nak/Rej RTR = Receive-Terminate-Request str = Send-Terminate-Request RTA = Receive-Terminate-Ack sta = Send-Terminate-Ack RUC = Receive-Unknown-Code scj = Send-Code-Reject RXJ+ = Receive-Code-Reject (permitted) or Receive-Protocol-Reject RXJ- = Receive-Code-Reject (catastrophic) or Receive-Protocol-Reject */ static int cp_table[EVENTS][STATES] = { /* CLOSED STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED 0 1 2 3 4 5 6 */ {IRC|SCR|3, INV , INV , INV , INV , INV , INV }, /* START */ { INV , 0 , 0 , 0 , 0 , 0 , 0 }, /* STOP */ { INV , INV ,STR|2, SCR|3 ,SCR|3, SCR|5 , INV }, /* TO+ */ { INV , INV , 1 , 1 , 1 , 1 , INV }, /* TO- */ { STA|0 ,IRC|SCR|SCA|5, 2 , SCA|5 ,SCA|6, SCA|5 ,SCR|SCA|5}, /* RCR+ */ { STA|0 ,IRC|SCR|SCN|3, 2 , SCN|3 ,SCN|4, SCN|3 ,SCR|SCN|3}, /* RCR- */ { STA|0 , STA|1 , 2 , IRC|4 ,SCR|3, 6 , SCR|3 }, /* RCA */ { STA|0 , STA|1 , 2 ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3 }, /* RCN */ { STA|0 , STA|1 ,STA|2, STA|3 ,STA|3, STA|3 ,ZRC|STA|2}, /* RTR */ { 0 , 1 , 1 , 3 , 3 , 5 , SCR|3 }, /* RTA */ { SCJ|0 , SCJ|1 ,SCJ|2, SCJ|3 ,SCJ|4, SCJ|5 , SCJ|6 }, /* RUC */ { 0 , 1 , 2 , 3 , 3 , 5 , 6 }, /* RXJ+ */ { 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */ }; /* SCA: RCR+ must supply id, len and data SCN: RCR- must supply code, id, len and data STA: RTR must supply id SCJ: RUC must supply CP packet len and data */ static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code, u8 id, unsigned int len, const void *data) { int old_state, action; struct ppp *ppp = get_ppp(dev); struct proto *proto = get_proto(dev, pid); old_state = proto->state; BUG_ON(old_state >= STATES); BUG_ON(event >= EVENTS); #if DEBUG_STATE printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name, proto_name(pid), event_names[event], state_names[proto->state]); #endif action = cp_table[event][old_state]; proto->state = action & STATE_MASK; if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */ mod_timer(&proto->timer, proto->timeout = jiffies + ppp->req_timeout * HZ); if (action & ZRC) proto->restart_counter = 0; if (action & IRC) proto->restart_counter = (proto->state == STOPPING) ? ppp->term_retries : ppp->cr_retries; if (action & SCR) /* send Configure-Request */ ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq, 0, NULL); if (action & SCA) /* send Configure-Ack */ ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data); if (action & SCN) /* send Configure-Nak/Reject */ ppp_tx_cp(dev, pid, code, id, len, data); if (action & STR) /* send Terminate-Request */ ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL); if (action & STA) /* send Terminate-Ack */ ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL); if (action & SCJ) /* send Code-Reject */ ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data); if (old_state != OPENED && proto->state == OPENED) { printk(KERN_INFO "%s: %s up\n", dev->name, proto_name(pid)); if (pid == PID_LCP) { netif_dormant_off(dev); ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL); ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL); ppp->last_pong = jiffies; mod_timer(&proto->timer, proto->timeout = jiffies + ppp->keepalive_interval * HZ); } } if (old_state == OPENED && proto->state != OPENED) { printk(KERN_INFO "%s: %s down\n", dev->name, proto_name(pid)); if (pid == PID_LCP) { netif_dormant_on(dev); ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL); ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL); } } if (old_state != CLOSED && proto->state == CLOSED) del_timer(&proto->timer); #if DEBUG_STATE printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name, proto_name(pid), event_names[event], state_names[proto->state]); #endif } static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, unsigned int req_len, const u8 *data) { static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 }; const u8 *opt; u8 *out; unsigned int len = req_len, nak_len = 0, rej_len = 0; if (!(out = kmalloc(len, GFP_ATOMIC))) { dev->stats.rx_dropped++; return; /* out of memory, ignore CR packet */ } for (opt = data; len; len -= opt[1], opt += opt[1]) { if (len < 2 || len < opt[1]) { dev->stats.rx_errors++; kfree(out); return; /* bad packet, drop silently */ } if (pid == PID_LCP) switch (opt[0]) { case LCP_OPTION_MRU: continue; /* MRU always OK and > 1500 bytes? */ case LCP_OPTION_ACCM: /* async control character map */ if (!memcmp(opt, valid_accm, sizeof(valid_accm))) continue; if (!rej_len) { /* NAK it */ memcpy(out + nak_len, valid_accm, sizeof(valid_accm)); nak_len += sizeof(valid_accm); continue; } break; case LCP_OPTION_MAGIC: if (opt[1] != 6 || (!opt[2] && !opt[3] && !opt[4] && !opt[5])) break; /* reject invalid magic number */ continue; } /* reject this option */ memcpy(out + rej_len, opt, opt[1]); rej_len += opt[1]; } if (rej_len) ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out); else if (nak_len) ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out); else ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); kfree(out); } static int ppp_rx(struct sk_buff *skb) { struct hdlc_header *hdr = (struct hdlc_header*)skb->data; struct net_device *dev = skb->dev; struct ppp *ppp = get_ppp(dev); struct proto *proto; struct cp_header *cp; unsigned long flags; unsigned int len; u16 pid; #if DEBUG_CP int i; char *ptr; #endif spin_lock_irqsave(&ppp->lock, flags); /* Check HDLC header */ if (skb->len < sizeof(struct hdlc_header)) goto rx_error; cp = (struct cp_header*)skb_pull(skb, sizeof(struct hdlc_header)); if (hdr->address != HDLC_ADDR_ALLSTATIONS || hdr->control != HDLC_CTRL_UI) goto rx_error; pid = ntohs(hdr->protocol); proto = get_proto(dev, pid); if (!proto) { if (ppp->protos[IDX_LCP].state == OPENED) ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ, ++ppp->seq, skb->len + 2, &hdr->protocol); goto rx_error; } len = ntohs(cp->len); if (len < sizeof(struct cp_header) /* no complete CP header? */ || skb->len < len /* truncated packet? */) goto rx_error; skb_pull(skb, sizeof(struct cp_header)); len -= sizeof(struct cp_header); /* HDLC and CP headers stripped from skb */ #if DEBUG_CP if (cp->code < CP_CODES) sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code], cp->id); else sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id); ptr = debug_buffer + strlen(debug_buffer); for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) { sprintf(ptr, " %02X", skb->data[i]); ptr += strlen(ptr); } printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid), debug_buffer); #endif /* LCP only */ if (pid == PID_LCP) switch (cp->code) { case LCP_PROTO_REJ: pid = ntohs(*(__be16*)skb->data); if (pid == PID_LCP || pid == PID_IPCP || pid == PID_IPV6CP) ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL); goto out; case LCP_ECHO_REQ: /* send Echo-Reply */ if (len >= 4 && proto->state == OPENED) ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY, cp->id, len - 4, skb->data + 4); goto out; case LCP_ECHO_REPLY: if (cp->id == ppp->echo_id) ppp->last_pong = jiffies; goto out; case LCP_DISC_REQ: /* discard */ goto out; } /* LCP, IPCP and IPV6CP */ switch (cp->code) { case CP_CONF_REQ: ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data); goto out; case CP_CONF_ACK: if (cp->id == proto->cr_id) ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL); goto out; case CP_CONF_REJ: case CP_CONF_NAK: if (cp->id == proto->cr_id) ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL); goto out; case CP_TERM_REQ: ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL); goto out; case CP_TERM_ACK: ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL); goto out; case CP_CODE_REJ: ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL); goto out; default: len += sizeof(struct cp_header); if (len > dev->mtu) len = dev->mtu; ppp_cp_event(dev, pid, RUC, 0, 0, len, cp); goto out; } goto out; rx_error: dev->stats.rx_errors++; out: spin_unlock_irqrestore(&ppp->lock, flags); dev_kfree_skb_any(skb); ppp_tx_flush(); return NET_RX_DROP; } static void ppp_timer(unsigned long arg) { struct proto *proto = (struct proto *)arg; struct ppp *ppp = get_ppp(proto->dev); unsigned long flags; spin_lock_irqsave(&ppp->lock, flags); switch (proto->state) { case STOPPING: case REQ_SENT: case ACK_RECV: case ACK_SENT: if (proto->restart_counter) { ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 0, NULL); proto->restart_counter--; } else ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 0, NULL); break; case OPENED: if (proto->pid != PID_LCP) break; if (time_after(jiffies, ppp->last_pong + ppp->keepalive_timeout * HZ)) { printk(KERN_INFO "%s: Link down\n", proto->dev->name); ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL); ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL); } else { /* send keep-alive packet */ ppp->echo_id = ++ppp->seq; ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ, ppp->echo_id, 0, NULL); proto->timer.expires = jiffies + ppp->keepalive_interval * HZ; add_timer(&proto->timer); } break; } spin_unlock_irqrestore(&ppp->lock, flags); ppp_tx_flush(); } static void ppp_start(struct net_device *dev) { struct ppp *ppp = get_ppp(dev); int i; for (i = 0; i < IDX_COUNT; i++) { struct proto *proto = &ppp->protos[i]; proto->dev = dev; init_timer(&proto->timer); proto->timer.function = ppp_timer; proto->timer.data = (unsigned long)proto; proto->state = CLOSED; } ppp->protos[IDX_LCP].pid = PID_LCP; ppp->protos[IDX_IPCP].pid = PID_IPCP; ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP; ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL); } static void ppp_stop(struct net_device *dev) { ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); } static void ppp_close(struct net_device *dev) { ppp_tx_flush(); } static struct hdlc_proto proto = { .start = ppp_start, .stop = ppp_stop, .close = ppp_close, .type_trans = ppp_type_trans, .ioctl = ppp_ioctl, .netif_rx = ppp_rx, .module = THIS_MODULE, }; static const struct header_ops ppp_header_ops = { .create = ppp_hard_header, }; static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) { hdlc_device *hdlc = dev_to_hdlc(dev); struct ppp *ppp; int result; switch (ifr->ifr_settings.type) { case IF_GET_PROTO: if (dev_to_hdlc(dev)->proto != &proto) return -EINVAL; ifr->ifr_settings.type = IF_PROTO_PPP; return 0; /* return protocol only, no settable parameters */ case IF_PROTO_PPP: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (dev->flags & IFF_UP) return -EBUSY; /* no settable parameters */ result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); if (result) return result; result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp)); if (result) return result; ppp = get_ppp(dev); spin_lock_init(&ppp->lock); ppp->req_timeout = 2; ppp->cr_retries = 10; ppp->term_retries = 2; ppp->keepalive_interval = 10; ppp->keepalive_timeout = 60; dev->hard_header_len = sizeof(struct hdlc_header); dev->header_ops = &ppp_header_ops; dev->type = ARPHRD_PPP; netif_dormant_on(dev); return 0; } return -EINVAL; } static int __init mod_init(void) { skb_queue_head_init(&tx_queue); register_hdlc_protocol(&proto); return 0; } static void __exit mod_exit(void) { unregister_hdlc_protocol(&proto); } module_init(mod_init); module_exit(mod_exit); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("PPP protocol support for generic HDLC"); MODULE_LICENSE("GPL v2");
gpl-2.0
philozheng/kernel-msm
arch/arm/mach-msm/board-mahimahi-panel.c
3634
22498
/* linux/arch/arm/mach-msm/board-mahimahi-panel.c * * Copyright (c) 2009 Google Inc. * Author: Dima Zavin <dima@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <asm/io.h> #include <asm/mach-types.h> #include <mach/msm_fb.h> #include <mach/msm_iomap.h> #include <mach/vreg.h> #include <mach/proc_comm.h> #include "board-mahimahi.h" #include "devices.h" #define SPI_CONFIG (0x00000000) #define SPI_IO_CONTROL (0x00000004) #define SPI_OPERATIONAL (0x00000030) #define SPI_ERROR_FLAGS_EN (0x00000038) #define SPI_ERROR_FLAGS (0x00000038) #define SPI_OUTPUT_FIFO (0x00000100) static void __iomem *spi_base; static struct clk *spi_clk ; static struct vreg *vreg_lcm_rftx_2v6; static struct vreg *vreg_lcm_aux_2v6; static int qspi_send(uint32_t id, uint8_t data) { uint32_t err; /* bit-5: OUTPUT_FIFO_NOT_EMPTY */ while (readl(spi_base + SPI_OPERATIONAL) & (1<<5)) { if ((err = readl(spi_base + SPI_ERROR_FLAGS))) { pr_err("%s: ERROR: SPI_ERROR_FLAGS=0x%08x\n", __func__, err); return -EIO; } } writel((0x7000 | (id << 9) | data) << 16, spi_base + SPI_OUTPUT_FIFO); udelay(100); return 0; } static int qspi_send_9bit(uint32_t id, uint8_t data) { uint32_t err; while (readl(spi_base + SPI_OPERATIONAL) & (1<<5)) { err = readl(spi_base + SPI_ERROR_FLAGS); if (err) { pr_err("%s: ERROR: SPI_ERROR_FLAGS=0x%08x\n", __func__, err); return -EIO; } } writel(((id << 8) | data) << 23, spi_base + SPI_OUTPUT_FIFO); udelay(100); return 0; } static int lcm_writeb(uint8_t reg, uint8_t val) { qspi_send(0x0, reg); qspi_send(0x1, val); return 0; } static int lcm_writew(uint8_t reg, uint16_t val) { qspi_send(0x0, reg); qspi_send(0x1, val >> 8); qspi_send(0x1, val & 0xff); return 0; } static struct resource resources_msm_fb[] = { { .start = MSM_FB_BASE, .end = MSM_FB_BASE + MSM_FB_SIZE - 1, .flags = IORESOURCE_MEM, }, }; struct lcm_tbl { uint8_t reg; uint8_t val; }; static struct lcm_tbl samsung_oled_rgb565_init_table[] = { { 0x31, 0x08 }, { 0x32, 0x14 }, { 0x30, 0x2 }, { 0x27, 0x1 }, { 0x12, 0x8 }, { 0x13, 0x8 }, { 0x15, 0x0 }, { 0x16, 0x02 }, { 0x39, 0x24 }, { 0x17, 0x22 }, { 0x18, 0x33 }, { 0x19, 0x3 }, { 0x1A, 0x1 }, { 0x22, 0xA4 }, { 0x23, 0x0 }, { 0x26, 0xA0 }, }; static struct lcm_tbl samsung_oled_rgb666_init_table[] = { { 0x31, 0x08 }, { 0x32, 0x14 }, { 0x30, 0x2 }, { 0x27, 0x1 }, { 0x12, 0x8 }, { 0x13, 0x8 }, { 0x15, 0x0 }, { 0x16, 0x01 }, { 0x39, 0x24 }, { 0x17, 0x22 }, { 0x18, 0x33 }, { 0x19, 0x3 }, { 0x1A, 0x1 }, { 0x22, 0xA4 }, { 0x23, 0x0 }, { 0x26, 0xA0 }, }; static struct lcm_tbl *init_tablep = samsung_oled_rgb565_init_table; static size_t init_table_sz = ARRAY_SIZE(samsung_oled_rgb565_init_table); #define OLED_GAMMA_TABLE_SIZE (7 * 3) static struct lcm_tbl samsung_oled_gamma_table[][OLED_GAMMA_TABLE_SIZE] = { /* level 10 */ { /* Gamma-R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x3f }, { 0x43, 0x35 }, { 0x44, 0x30 }, { 0x45, 0x2c }, { 0x46, 0x13 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x0 }, { 0x53, 0x0 }, { 0x54, 0x27 }, { 0x55, 0x2b }, { 0x56, 0x12 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x3f }, { 0x63, 0x34 }, { 0x64, 0x2f }, { 0x65, 0x2b }, { 0x66, 0x1b }, }, /* level 40 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x3e }, { 0x43, 0x2e }, { 0x44, 0x2d }, { 0x45, 0x28 }, { 0x46, 0x21 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x0 }, { 0x53, 0x21 }, { 0x54, 0x2a }, { 0x55, 0x28 }, { 0x56, 0x20 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x3e }, { 0x63, 0x2d }, { 0x64, 0x2b }, { 0x65, 0x26 }, { 0x66, 0x2d }, }, /* level 70 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x35 }, { 0x43, 0x2c }, { 0x44, 0x2b }, { 0x45, 0x26 }, { 0x46, 0x29 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x0 }, { 0x53, 0x25 }, { 0x54, 0x29 }, { 0x55, 0x26 }, { 0x56, 0x28 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x34 }, { 0x63, 0x2b }, { 0x64, 0x2a }, { 0x65, 0x23 }, { 0x66, 0x37 }, }, /* level 100 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x30 }, { 0x43, 0x2a }, { 0x44, 0x2b }, { 0x45, 0x24 }, { 0x46, 0x2f }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x0 }, { 0x53, 0x25 }, { 0x54, 0x29 }, { 0x55, 0x24 }, { 0x56, 0x2e }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x2f }, { 0x63, 0x29 }, { 0x64, 0x29 }, { 0x65, 0x21 }, { 0x66, 0x3f }, }, /* level 130 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x2e }, { 0x43, 0x29 }, { 0x44, 0x2a }, { 0x45, 0x23 }, { 0x46, 0x34 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0xa }, { 0x53, 0x25 }, { 0x54, 0x28 }, { 0x55, 0x23 }, { 0x56, 0x33 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x2d }, { 0x63, 0x28 }, { 0x64, 0x27 }, { 0x65, 0x20 }, { 0x66, 0x46 }, }, /* level 160 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x2b }, { 0x43, 0x29 }, { 0x44, 0x28 }, { 0x45, 0x23 }, { 0x46, 0x38 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0xb }, { 0x53, 0x25 }, { 0x54, 0x27 }, { 0x55, 0x23 }, { 0x56, 0x37 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x29 }, { 0x63, 0x28 }, { 0x64, 0x25 }, { 0x65, 0x20 }, { 0x66, 0x4b }, }, /* level 190 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x29 }, { 0x43, 0x29 }, { 0x44, 0x27 }, { 0x45, 0x22 }, { 0x46, 0x3c }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x10 }, { 0x53, 0x26 }, { 0x54, 0x26 }, { 0x55, 0x22 }, { 0x56, 0x3b }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x28 }, { 0x63, 0x28 }, { 0x64, 0x24 }, { 0x65, 0x1f }, { 0x66, 0x50 }, }, /* level 220 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x28 }, { 0x43, 0x28 }, { 0x44, 0x28 }, { 0x45, 0x20 }, { 0x46, 0x40 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x11 }, { 0x53, 0x25 }, { 0x54, 0x27 }, { 0x55, 0x20 }, { 0x56, 0x3f }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x27 }, { 0x63, 0x26 }, { 0x64, 0x26 }, { 0x65, 0x1c }, { 0x66, 0x56 }, }, /* level 250 */ { /* Gamma -R */ { 0x40, 0x0 }, { 0x41, 0x3f }, { 0x42, 0x2a }, { 0x43, 0x27 }, { 0x44, 0x27 }, { 0x45, 0x1f }, { 0x46, 0x44 }, /* Gamma -G */ { 0x50, 0x0 }, { 0x51, 0x0 }, { 0x52, 0x17 }, { 0x53, 0x24 }, { 0x54, 0x26 }, { 0x55, 0x1f }, { 0x56, 0x43 }, /* Gamma -B */ { 0x60, 0x0 }, { 0x61, 0x3f }, { 0x62, 0x2a }, { 0x63, 0x25 }, { 0x64, 0x24 }, { 0x65, 0x1b }, { 0x66, 0x5c }, }, }; #define SAMSUNG_OLED_NUM_LEVELS ARRAY_SIZE(samsung_oled_gamma_table) #define SAMSUNG_OLED_MIN_VAL 10 #define SAMSUNG_OLED_MAX_VAL 250 #define SAMSUNG_OLED_DEFAULT_VAL (SAMSUNG_OLED_MIN_VAL + \ (SAMSUNG_OLED_MAX_VAL - \ SAMSUNG_OLED_MIN_VAL) / 2) #define SAMSUNG_OLED_LEVEL_STEP ((SAMSUNG_OLED_MAX_VAL - \ SAMSUNG_OLED_MIN_VAL) / \ (SAMSUNG_OLED_NUM_LEVELS - 1)) #define SONY_TFT_DEF_USER_VAL 102 #define SONY_TFT_MIN_USER_VAL 30 #define SONY_TFT_MAX_USER_VAL 255 #define SONY_TFT_DEF_PANEL_VAL 155 #define SONY_TFT_MIN_PANEL_VAL 26 #define SONY_TFT_MAX_PANEL_VAL 255 static DEFINE_MUTEX(panel_lock); static struct work_struct brightness_delayed_work; static DEFINE_SPINLOCK(brightness_lock); static uint8_t new_val = SAMSUNG_OLED_DEFAULT_VAL; static uint8_t last_val = SAMSUNG_OLED_DEFAULT_VAL; static uint8_t table_sel_vals[] = { 0x43, 0x34 }; static int table_sel_idx = 0; static uint8_t tft_panel_on; static void gamma_table_bank_select(void) { lcm_writeb(0x39, table_sel_vals[table_sel_idx]); table_sel_idx ^= 1; } static void samsung_oled_set_gamma_val(int val) { int i; int level; int frac; val = clamp(val, SAMSUNG_OLED_MIN_VAL, SAMSUNG_OLED_MAX_VAL); val = (val / 2) * 2; level = (val - SAMSUNG_OLED_MIN_VAL) / SAMSUNG_OLED_LEVEL_STEP; frac = (val - SAMSUNG_OLED_MIN_VAL) % SAMSUNG_OLED_LEVEL_STEP; clk_enable(spi_clk); for (i = 0; i < OLED_GAMMA_TABLE_SIZE; ++i) { unsigned int v1; unsigned int v2 = 0; u8 v; if (frac == 0) { v = samsung_oled_gamma_table[level][i].val; } else { v1 = samsung_oled_gamma_table[level][i].val; v2 = samsung_oled_gamma_table[level+1][i].val; v = (v1 * (SAMSUNG_OLED_LEVEL_STEP - frac) + v2 * frac) / SAMSUNG_OLED_LEVEL_STEP; } lcm_writeb(samsung_oled_gamma_table[level][i].reg, v); } gamma_table_bank_select(); clk_disable(spi_clk); last_val = val; } static int samsung_oled_panel_init(struct msm_lcdc_panel_ops *ops) { pr_info("%s: +()\n", __func__); mutex_lock(&panel_lock); clk_enable(spi_clk); /* Set the gamma write target to 4, leave the current gamma set at 2 */ lcm_writeb(0x39, 0x24); clk_disable(spi_clk); mutex_unlock(&panel_lock); pr_info("%s: -()\n", __func__); return 0; } static int samsung_oled_panel_unblank(struct msm_lcdc_panel_ops *ops) { int i; pr_info("%s: +()\n", __func__); mutex_lock(&panel_lock); gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 1); udelay(50); gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 0); udelay(20); gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 1); msleep(20); clk_enable(spi_clk); for (i = 0; i < init_table_sz; i++) lcm_writeb(init_tablep[i].reg, init_tablep[i].val); lcm_writew(0xef, 0xd0e8); lcm_writeb(0x1d, 0xa0); table_sel_idx = 0; gamma_table_bank_select(); samsung_oled_set_gamma_val(last_val); msleep(250); lcm_writeb(0x14, 0x03); clk_disable(spi_clk); mutex_unlock(&panel_lock); pr_info("%s: -()\n", __func__); return 0; } static int samsung_oled_panel_blank(struct msm_lcdc_panel_ops *ops) { pr_info("%s: +()\n", __func__); mutex_lock(&panel_lock); clk_enable(spi_clk); lcm_writeb(0x14, 0x0); mdelay(1); lcm_writeb(0x1d, 0xa1); clk_disable(spi_clk); msleep(200); gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 0); mutex_unlock(&panel_lock); pr_info("%s: -()\n", __func__); return 0; } struct lcm_cmd { int reg; uint32_t val; unsigned delay; }; #define LCM_GPIO_CFG(gpio, func, str) \ PCOM_GPIO_CFG(gpio, func, GPIO_OUTPUT, GPIO_NO_PULL, str) static uint32_t sony_tft_display_on_gpio_table[] = { LCM_GPIO_CFG(MAHIMAHI_LCD_R1, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_R2, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_R3, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_R4, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_R5, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G0, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G1, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G2, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G3, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G4, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G5, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_B1, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_B2, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_B3, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_B4, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_B5, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_PCLK, 1, GPIO_4MA), LCM_GPIO_CFG(MAHIMAHI_LCD_VSYNC, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_HSYNC, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_DE, 1, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_CLK, 1, GPIO_4MA), LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_DO, 1, GPIO_4MA), LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_CSz, 1, GPIO_4MA), }; static uint32_t sony_tft_display_off_gpio_table[] = { LCM_GPIO_CFG(MAHIMAHI_LCD_R1, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_R2, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_R3, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_R4, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_R5, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G0, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G1, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G2, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G3, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G4, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_G5, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_B1, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_B2, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_B3, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_B4, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_B5, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_PCLK, 0, GPIO_4MA), LCM_GPIO_CFG(MAHIMAHI_LCD_VSYNC, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_HSYNC, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_DE, 0, GPIO_8MA), LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_CLK, 0, GPIO_4MA), LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_DO, 0, GPIO_4MA), LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_CSz, 0, GPIO_4MA), }; #undef LCM_GPIO_CFG #define SONY_TFT_DEF_PANEL_DELTA \ (SONY_TFT_DEF_PANEL_VAL - SONY_TFT_MIN_PANEL_VAL) #define SONY_TFT_DEF_USER_DELTA \ (SONY_TFT_DEF_USER_VAL - SONY_TFT_MIN_USER_VAL) static void sony_tft_set_pwm_val(int val) { pr_info("%s: %d\n", __func__, val); last_val = val; if (!tft_panel_on) return; if (val <= SONY_TFT_DEF_USER_VAL) { if (val <= SONY_TFT_MIN_USER_VAL) val = SONY_TFT_MIN_PANEL_VAL; else val = SONY_TFT_DEF_PANEL_DELTA * (val - SONY_TFT_MIN_USER_VAL) / SONY_TFT_DEF_USER_DELTA + SONY_TFT_MIN_PANEL_VAL; } else val = (SONY_TFT_MAX_PANEL_VAL - SONY_TFT_DEF_PANEL_VAL) * (val - SONY_TFT_DEF_USER_VAL) / (SONY_TFT_MAX_USER_VAL - SONY_TFT_DEF_USER_VAL) + SONY_TFT_DEF_PANEL_VAL; clk_enable(spi_clk); qspi_send_9bit(0x0, 0x51); qspi_send_9bit(0x1, val); qspi_send_9bit(0x0, 0x53); qspi_send_9bit(0x1, 0x24); clk_disable(spi_clk); } #undef SONY_TFT_DEF_PANEL_DELTA #undef SONY_TFT_DEF_USER_DELTA static void sony_tft_panel_config_gpio_table(uint32_t *table, int len) { int n; unsigned id; for (n = 0; n < len; n++) { id = table[n]; msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); } } static int sony_tft_panel_power(int on) { unsigned id, on_off; if (on) { on_off = 0; vreg_enable(vreg_lcm_aux_2v6); vreg_enable(vreg_lcm_rftx_2v6); id = PM_VREG_PDOWN_AUX_ID; msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); id = PM_VREG_PDOWN_RFTX_ID; msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); mdelay(10); gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 1); mdelay(10); gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 0); udelay(500); gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 1); mdelay(10); sony_tft_panel_config_gpio_table( sony_tft_display_on_gpio_table, ARRAY_SIZE(sony_tft_display_on_gpio_table)); } else { on_off = 1; gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 0); mdelay(120); vreg_disable(vreg_lcm_rftx_2v6); vreg_disable(vreg_lcm_aux_2v6); id = PM_VREG_PDOWN_RFTX_ID; msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); id = PM_VREG_PDOWN_AUX_ID; msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); sony_tft_panel_config_gpio_table( sony_tft_display_off_gpio_table, ARRAY_SIZE(sony_tft_display_off_gpio_table)); } return 0; } static int sony_tft_panel_init(struct msm_lcdc_panel_ops *ops) { return 0; } static int sony_tft_panel_unblank(struct msm_lcdc_panel_ops *ops) { pr_info("%s: +()\n", __func__); mutex_lock(&panel_lock); if (tft_panel_on) { pr_info("%s: -() already unblanked\n", __func__); goto done; } sony_tft_panel_power(1); msleep(45); clk_enable(spi_clk); qspi_send_9bit(0x0, 0x11); msleep(5); qspi_send_9bit(0x0, 0x3a); qspi_send_9bit(0x1, 0x05); msleep(100); qspi_send_9bit(0x0, 0x29); /* unlock register page for pwm setting */ qspi_send_9bit(0x0, 0xf0); qspi_send_9bit(0x1, 0x5a); qspi_send_9bit(0x1, 0x5a); qspi_send_9bit(0x0, 0xf1); qspi_send_9bit(0x1, 0x5a); qspi_send_9bit(0x1, 0x5a); qspi_send_9bit(0x0, 0xd0); qspi_send_9bit(0x1, 0x5a); qspi_send_9bit(0x1, 0x5a); qspi_send_9bit(0x0, 0xc2); qspi_send_9bit(0x1, 0x53); qspi_send_9bit(0x1, 0x12); clk_disable(spi_clk); msleep(100); tft_panel_on = 1; sony_tft_set_pwm_val(last_val); pr_info("%s: -()\n", __func__); done: mutex_unlock(&panel_lock); return 0; } static int sony_tft_panel_blank(struct msm_lcdc_panel_ops *ops) { pr_info("%s: +()\n", __func__); mutex_lock(&panel_lock); clk_enable(spi_clk); qspi_send_9bit(0x0, 0x28); qspi_send_9bit(0x0, 0x10); clk_disable(spi_clk); msleep(40); sony_tft_panel_power(0); tft_panel_on = 0; mutex_unlock(&panel_lock); pr_info("%s: -()\n", __func__); return 0; } static struct msm_lcdc_panel_ops mahimahi_lcdc_amoled_panel_ops = { .init = samsung_oled_panel_init, .blank = samsung_oled_panel_blank, .unblank = samsung_oled_panel_unblank, }; static struct msm_lcdc_panel_ops mahimahi_lcdc_tft_panel_ops = { .init = sony_tft_panel_init, .blank = sony_tft_panel_blank, .unblank = sony_tft_panel_unblank, }; static struct msm_lcdc_timing mahimahi_lcdc_amoled_timing = { .clk_rate = 24576000, .hsync_pulse_width = 4, .hsync_back_porch = 8, .hsync_front_porch = 8, .hsync_skew = 0, .vsync_pulse_width = 2, .vsync_back_porch = 8, .vsync_front_porch = 8, .vsync_act_low = 1, .hsync_act_low = 1, .den_act_low = 1, }; static struct msm_lcdc_timing mahimahi_lcdc_tft_timing = { .clk_rate = 24576000, .hsync_pulse_width = 2, .hsync_back_porch = 20, .hsync_front_porch = 20, .hsync_skew = 0, .vsync_pulse_width = 2, .vsync_back_porch = 6, .vsync_front_porch = 4, .vsync_act_low = 1, .hsync_act_low = 1, .den_act_low = 0, }; static struct msm_fb_data mahimahi_lcdc_fb_data = { .xres = 480, .yres = 800, .width = 48, .height = 80, .output_format = MSM_MDP_OUT_IF_FMT_RGB565, }; static struct msm_lcdc_platform_data mahimahi_lcdc_amoled_platform_data = { .panel_ops = &mahimahi_lcdc_amoled_panel_ops, .timing = &mahimahi_lcdc_amoled_timing, .fb_id = 0, .fb_data = &mahimahi_lcdc_fb_data, .fb_resource = &resources_msm_fb[0], }; static struct msm_lcdc_platform_data mahimahi_lcdc_tft_platform_data = { .panel_ops = &mahimahi_lcdc_tft_panel_ops, .timing = &mahimahi_lcdc_tft_timing, .fb_id = 0, .fb_data = &mahimahi_lcdc_fb_data, .fb_resource = &resources_msm_fb[0], }; static struct platform_device mahimahi_lcdc_amoled_device = { .name = "msm_mdp_lcdc", .id = -1, .dev = { .platform_data = &mahimahi_lcdc_amoled_platform_data, }, }; static struct platform_device mahimahi_lcdc_tft_device = { .name = "msm_mdp_lcdc", .id = -1, .dev = { .platform_data = &mahimahi_lcdc_tft_platform_data, }, }; static int mahimahi_init_spi_hack(void) { int ret; spi_base = ioremap(MSM_SPI_PHYS, MSM_SPI_SIZE); if (!spi_base) return -1; spi_clk = clk_get(&msm_device_spi.dev, "spi_clk"); if (IS_ERR(spi_clk)) { pr_err("%s: unable to get spi_clk\n", __func__); ret = PTR_ERR(spi_clk); goto err_clk_get; } clk_enable(spi_clk); printk("spi: SPI_CONFIG=%x\n", readl(spi_base + SPI_CONFIG)); printk("spi: SPI_IO_CONTROL=%x\n", readl(spi_base + SPI_IO_CONTROL)); printk("spi: SPI_OPERATIONAL=%x\n", readl(spi_base + SPI_OPERATIONAL)); printk("spi: SPI_ERROR_FLAGS_EN=%x\n", readl(spi_base + SPI_ERROR_FLAGS_EN)); printk("spi: SPI_ERROR_FLAGS=%x\n", readl(spi_base + SPI_ERROR_FLAGS)); printk("-%s()\n", __FUNCTION__); clk_disable(spi_clk); return 0; err_clk_get: iounmap(spi_base); return ret; } static void mahimahi_brightness_set(struct led_classdev *led_cdev, enum led_brightness val) { unsigned long flags; led_cdev->brightness = val; spin_lock_irqsave(&brightness_lock, flags); new_val = val; spin_unlock_irqrestore(&brightness_lock, flags); schedule_work(&brightness_delayed_work); } static void mahimahi_brightness_amoled_set_work(struct work_struct *work_ptr) { unsigned long flags; uint8_t val; spin_lock_irqsave(&brightness_lock, flags); val = new_val; spin_unlock_irqrestore(&brightness_lock, flags); mutex_lock(&panel_lock); samsung_oled_set_gamma_val(val); mutex_unlock(&panel_lock); } static void mahimahi_brightness_tft_set_work(struct work_struct *work_ptr) { unsigned long flags; uint8_t val; spin_lock_irqsave(&brightness_lock, flags); val = new_val; spin_unlock_irqrestore(&brightness_lock, flags); mutex_lock(&panel_lock); sony_tft_set_pwm_val(val); mutex_unlock(&panel_lock); } static struct led_classdev mahimahi_brightness_led = { .name = "lcd-backlight", .brightness = LED_FULL, .brightness_set = mahimahi_brightness_set, }; int __init mahimahi_init_panel(void) { int ret; if (!machine_is_mahimahi()) return 0; if (system_rev > 0xC0) { /* CDMA version (except for EVT1) supports RGB666 */ init_tablep = samsung_oled_rgb666_init_table; init_table_sz = ARRAY_SIZE(samsung_oled_rgb666_init_table); mahimahi_lcdc_fb_data.output_format = MSM_MDP_OUT_IF_FMT_RGB666; } ret = platform_device_register(&msm_device_mdp); if (ret != 0) return ret; ret = mahimahi_init_spi_hack(); if (ret != 0) return ret; if (gpio_get_value(MAHIMAHI_GPIO_LCD_ID0)) { pr_info("%s: tft panel\n", __func__); vreg_lcm_rftx_2v6 = vreg_get(0, "rftx"); if (IS_ERR(vreg_lcm_rftx_2v6)) return PTR_ERR(vreg_lcm_rftx_2v6); vreg_set_level(vreg_lcm_rftx_2v6, 2600); vreg_lcm_aux_2v6 = vreg_get(0, "gp4"); if (IS_ERR(vreg_lcm_aux_2v6)) return PTR_ERR(vreg_lcm_aux_2v6); if (gpio_get_value(MAHIMAHI_GPIO_LCD_RST_N)) tft_panel_on = 1; ret = platform_device_register(&mahimahi_lcdc_tft_device); INIT_WORK(&brightness_delayed_work, mahimahi_brightness_tft_set_work); } else { pr_info("%s: amoled panel\n", __func__); ret = platform_device_register(&mahimahi_lcdc_amoled_device); INIT_WORK(&brightness_delayed_work, mahimahi_brightness_amoled_set_work); } if (ret != 0) return ret; ret = led_classdev_register(NULL, &mahimahi_brightness_led); if (ret != 0) { pr_err("%s: Cannot register brightness led\n", __func__); return ret; } return 0; } device_initcall(mahimahi_init_panel);
gpl-2.0
srfarias/Ultra-Kernel
net/ipv4/netfilter.c
4658
6369
/* IPv4 specific functions of netfilter core */ #include <linux/kernel.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/gfp.h> #include <linux/export.h> #include <net/route.h> #include <net/xfrm.h> #include <net/ip.h> #include <net/netfilter/nf_queue.h> /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) { struct net *net = dev_net(skb_dst(skb)->dev); const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; unsigned int hh_len; if (addr_type == RTN_UNSPEC) addr_type = inet_addr_type(net, saddr); if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) flags |= FLOWI_FLAG_ANYSRC; else saddr = 0; /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. */ fl4.daddr = iph->daddr; fl4.saddr = saddr; fl4.flowi4_tos = RT_TOS(iph->tos); fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; fl4.flowi4_mark = skb->mark; fl4.flowi4_flags = flags; rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return -1; /* Drop old route. */ skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); if (skb_dst(skb)->error) return -1; #ifdef CONFIG_XFRM if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { struct dst_entry *dst = skb_dst(skb); skb_dst_set(skb, NULL); dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0); if (IS_ERR(dst)) return -1; skb_dst_set(skb, dst); } #endif /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 0, GFP_ATOMIC)) return -1; return 0; } EXPORT_SYMBOL(ip_route_me_harder); #ifdef CONFIG_XFRM int ip_xfrm_me_harder(struct sk_buff *skb) { struct flowi fl; unsigned int hh_len; struct dst_entry *dst; if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return 0; if (xfrm_decode_session(skb, &fl, AF_INET) < 0) return -1; dst = skb_dst(skb); if (dst->xfrm) dst = ((struct xfrm_dst *)dst)->route; dst_hold(dst); dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0); if (IS_ERR(dst)) return -1; skb_dst_drop(skb); skb_dst_set(skb, dst); /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) return -1; return 0; } EXPORT_SYMBOL(ip_xfrm_me_harder); #endif void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); EXPORT_SYMBOL(ip_nat_decode_session); /* * Extra routing may needed on local out, as the QUEUE target never * returns control to the table. */ struct ip_rt_info { __be32 daddr; __be32 saddr; u_int8_t tos; u_int32_t mark; }; static void nf_ip_saveroute(const struct sk_buff *skb, struct nf_queue_entry *entry) { struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { const struct iphdr *iph = ip_hdr(skb); rt_info->tos = iph->tos; rt_info->daddr = iph->daddr; rt_info->saddr = iph->saddr; rt_info->mark = skb->mark; } } static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) { const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { const struct iphdr *iph = ip_hdr(skb); if (!(iph->tos == rt_info->tos && skb->mark == rt_info->mark && iph->daddr == rt_info->daddr && iph->saddr == rt_info->saddr)) return ip_route_me_harder(skb, RTN_UNSPEC); } return 0; } __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { const struct iphdr *iph = ip_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) break; if ((protocol == 0 && !csum_fold(skb->csum)) || !csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - dataoff, protocol, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } /* fall through */ case CHECKSUM_NONE: if (protocol == 0) skb->csum = 0; else skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len - dataoff, protocol, 0); csum = __skb_checksum_complete(skb); } return csum; } EXPORT_SYMBOL(nf_ip_checksum); static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol) { const struct iphdr *iph = ip_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip_checksum(skb, hook, dataoff, protocol); /* fall through */ case CHECKSUM_NONE: skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, skb->len - dataoff, 0); skb->ip_summed = CHECKSUM_NONE; return __skb_checksum_complete_head(skb, dataoff + len); } return csum; } static int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict __always_unused) { struct rtable *rt = ip_route_output_key(net, &fl->u.ip4); if (IS_ERR(rt)) return PTR_ERR(rt); *dst = &rt->dst; return 0; } static const struct nf_afinfo nf_ip_afinfo = { .family = AF_INET, .checksum = nf_ip_checksum, .checksum_partial = nf_ip_checksum_partial, .route = nf_ip_route, .saveroute = nf_ip_saveroute, .reroute = nf_ip_reroute, .route_key_size = sizeof(struct ip_rt_info), }; static int ipv4_netfilter_init(void) { return nf_register_afinfo(&nf_ip_afinfo); } static void ipv4_netfilter_fini(void) { nf_unregister_afinfo(&nf_ip_afinfo); } module_init(ipv4_netfilter_init); module_exit(ipv4_netfilter_fini); #ifdef CONFIG_SYSCTL struct ctl_path nf_net_ipv4_netfilter_sysctl_path[] = { { .procname = "net", }, { .procname = "ipv4", }, { .procname = "netfilter", }, { } }; EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path); #endif /* CONFIG_SYSCTL */
gpl-2.0
StefanescuCristian/hammerhead
drivers/net/veth.c
4914
9789
/* * drivers/net/veth.c * * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc * * Author: Pavel Emelianov <xemul@openvz.org> * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com> * */ #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/u64_stats_sync.h> #include <net/dst.h> #include <net/xfrm.h> #include <linux/veth.h> #include <linux/module.h> #define DRV_NAME "veth" #define DRV_VERSION "1.0" #define MIN_MTU 68 /* Min L3 MTU */ #define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */ struct veth_net_stats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; u64 rx_dropped; struct u64_stats_sync syncp; }; struct veth_priv { struct net_device *peer; struct veth_net_stats __percpu *stats; }; /* * ethtool interface */ static struct { const char string[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "peer_ifindex" }, }; static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { cmd->supported = 0; cmd->advertising = 0; ethtool_cmd_speed_set(cmd, SPEED_10000); cmd->duplex = DUPLEX_FULL; cmd->port = PORT_TP; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; return 0; } static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch(stringset) { case ETH_SS_STATS: memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); break; } } static int veth_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(ethtool_stats_keys); default: return -EOPNOTSUPP; } } static void veth_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct veth_priv *priv; priv = netdev_priv(dev); data[0] = priv->peer->ifindex; } static const struct ethtool_ops veth_ethtool_ops = { .get_settings = veth_get_settings, .get_drvinfo = veth_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = veth_get_strings, .get_sset_count = veth_get_sset_count, .get_ethtool_stats = veth_get_ethtool_stats, }; /* * xmit */ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_device *rcv = NULL; struct veth_priv *priv, *rcv_priv; struct veth_net_stats *stats, *rcv_stats; int length; priv = netdev_priv(dev); rcv = priv->peer; rcv_priv = netdev_priv(rcv); stats = this_cpu_ptr(priv->stats); rcv_stats = this_cpu_ptr(rcv_priv->stats); /* don't change ip_summed == CHECKSUM_PARTIAL, as that will cause bad checksum on forwarded packets */ if (skb->ip_summed == CHECKSUM_NONE && rcv->features & NETIF_F_RXCSUM) skb->ip_summed = CHECKSUM_UNNECESSARY; length = skb->len; if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS) goto rx_drop; u64_stats_update_begin(&stats->syncp); stats->tx_bytes += length; stats->tx_packets++; u64_stats_update_end(&stats->syncp); u64_stats_update_begin(&rcv_stats->syncp); rcv_stats->rx_bytes += length; rcv_stats->rx_packets++; u64_stats_update_end(&rcv_stats->syncp); return NETDEV_TX_OK; rx_drop: u64_stats_update_begin(&rcv_stats->syncp); rcv_stats->rx_dropped++; u64_stats_update_end(&rcv_stats->syncp); return NETDEV_TX_OK; } /* * general routines */ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct veth_priv *priv = netdev_priv(dev); int cpu; for_each_possible_cpu(cpu) { struct veth_net_stats *stats = per_cpu_ptr(priv->stats, cpu); u64 rx_packets, rx_bytes, rx_dropped; u64 tx_packets, tx_bytes; unsigned int start; do { start = u64_stats_fetch_begin_bh(&stats->syncp); rx_packets = stats->rx_packets; tx_packets = stats->tx_packets; rx_bytes = stats->rx_bytes; tx_bytes = stats->tx_bytes; rx_dropped = stats->rx_dropped; } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; tot->rx_bytes += rx_bytes; tot->tx_bytes += tx_bytes; tot->rx_dropped += rx_dropped; } return tot; } static int veth_open(struct net_device *dev) { struct veth_priv *priv; priv = netdev_priv(dev); if (priv->peer == NULL) return -ENOTCONN; if (priv->peer->flags & IFF_UP) { netif_carrier_on(dev); netif_carrier_on(priv->peer); } return 0; } static int veth_close(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); netif_carrier_off(dev); netif_carrier_off(priv->peer); return 0; } static int is_valid_veth_mtu(int new_mtu) { return new_mtu >= MIN_MTU && new_mtu <= MAX_MTU; } static int veth_change_mtu(struct net_device *dev, int new_mtu) { if (!is_valid_veth_mtu(new_mtu)) return -EINVAL; dev->mtu = new_mtu; return 0; } static int veth_dev_init(struct net_device *dev) { struct veth_net_stats __percpu *stats; struct veth_priv *priv; stats = alloc_percpu(struct veth_net_stats); if (stats == NULL) return -ENOMEM; priv = netdev_priv(dev); priv->stats = stats; return 0; } static void veth_dev_free(struct net_device *dev) { struct veth_priv *priv; priv = netdev_priv(dev); free_percpu(priv->stats); free_netdev(dev); } static const struct net_device_ops veth_netdev_ops = { .ndo_init = veth_dev_init, .ndo_open = veth_open, .ndo_stop = veth_close, .ndo_start_xmit = veth_xmit, .ndo_change_mtu = veth_change_mtu, .ndo_get_stats64 = veth_get_stats64, .ndo_set_mac_address = eth_mac_addr, }; static void veth_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &veth_netdev_ops; dev->ethtool_ops = &veth_ethtool_ops; dev->features |= NETIF_F_LLTX; dev->destructor = veth_dev_free; dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; } /* * netlink interface */ static int veth_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (tb[IFLA_MTU]) { if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU]))) return -EINVAL; } return 0; } static struct rtnl_link_ops veth_link_ops; static int veth_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { int err; struct net_device *peer; struct veth_priv *priv; char ifname[IFNAMSIZ]; struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; struct ifinfomsg *ifmp; struct net *net; /* * create and register peer first */ if (data != NULL && data[VETH_INFO_PEER] != NULL) { struct nlattr *nla_peer; nla_peer = data[VETH_INFO_PEER]; ifmp = nla_data(nla_peer); err = nla_parse(peer_tb, IFLA_MAX, nla_data(nla_peer) + sizeof(struct ifinfomsg), nla_len(nla_peer) - sizeof(struct ifinfomsg), ifla_policy); if (err < 0) return err; err = veth_validate(peer_tb, NULL); if (err < 0) return err; tbp = peer_tb; } else { ifmp = NULL; tbp = tb; } if (tbp[IFLA_IFNAME]) nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); else snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); net = rtnl_link_get_net(src_net, tbp); if (IS_ERR(net)) return PTR_ERR(net); peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp); if (IS_ERR(peer)) { put_net(net); return PTR_ERR(peer); } if (tbp[IFLA_ADDRESS] == NULL) eth_hw_addr_random(peer); err = register_netdevice(peer); put_net(net); net = NULL; if (err < 0) goto err_register_peer; netif_carrier_off(peer); err = rtnl_configure_link(peer, ifmp); if (err < 0) goto err_configure_peer; /* * register dev last * * note, that since we've registered new device the dev's name * should be re-allocated */ if (tb[IFLA_ADDRESS] == NULL) eth_hw_addr_random(dev); if (tb[IFLA_IFNAME]) nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); else snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); if (strchr(dev->name, '%')) { err = dev_alloc_name(dev, dev->name); if (err < 0) goto err_alloc_name; } err = register_netdevice(dev); if (err < 0) goto err_register_dev; netif_carrier_off(dev); /* * tie the deviced together */ priv = netdev_priv(dev); priv->peer = peer; priv = netdev_priv(peer); priv->peer = dev; return 0; err_register_dev: /* nothing to do */ err_alloc_name: err_configure_peer: unregister_netdevice(peer); return err; err_register_peer: free_netdev(peer); return err; } static void veth_dellink(struct net_device *dev, struct list_head *head) { struct veth_priv *priv; struct net_device *peer; priv = netdev_priv(dev); peer = priv->peer; unregister_netdevice_queue(dev, head); unregister_netdevice_queue(peer, head); } static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = { [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, }; static struct rtnl_link_ops veth_link_ops = { .kind = DRV_NAME, .priv_size = sizeof(struct veth_priv), .setup = veth_setup, .validate = veth_validate, .newlink = veth_newlink, .dellink = veth_dellink, .policy = veth_policy, .maxtype = VETH_INFO_MAX, }; /* * init/fini */ static __init int veth_init(void) { return rtnl_link_register(&veth_link_ops); } static __exit void veth_exit(void) { rtnl_link_unregister(&veth_link_ops); } module_init(veth_init); module_exit(veth_exit); MODULE_DESCRIPTION("Virtual Ethernet Tunnel"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_RTNL_LINK(DRV_NAME);
gpl-2.0
Shaky156/Tegra-Note-7
drivers/video/via/via_aux_vt1622.c
9778
1358
/* * Copyright 2011 Florian Tobias Schandinat <FlorianSchandinat@gmx.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * driver for VIA VT1622(M) Digital TV Encoder */ #include <linux/slab.h> #include "via_aux.h" static const char *name = "VT1622(M) Digital TV Encoder"; static void probe(struct via_aux_bus *bus, u8 addr) { struct via_aux_drv drv = { .bus = bus, .addr = addr, .name = name}; u8 tmp; if (!via_aux_read(&drv, 0x1B, &tmp, 1) || tmp != 0x03) return; printk(KERN_INFO "viafb: Found %s at address 0x%x\n", name, addr); via_aux_add(&drv); } void via_aux_vt1622_probe(struct via_aux_bus *bus) { probe(bus, 0x20); probe(bus, 0x21); }
gpl-2.0
AD5GB/google_kernel_hammerhead
drivers/video/via/via_aux_vt1625.c
9778
1345
/* * Copyright 2011 Florian Tobias Schandinat <FlorianSchandinat@gmx.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * driver for VIA VT1625(M) HDTV Encoder */ #include <linux/slab.h> #include "via_aux.h" static const char *name = "VT1625(M) HDTV Encoder"; static void probe(struct via_aux_bus *bus, u8 addr) { struct via_aux_drv drv = { .bus = bus, .addr = addr, .name = name}; u8 tmp; if (!via_aux_read(&drv, 0x1B, &tmp, 1) || tmp != 0x50) return; printk(KERN_INFO "viafb: Found %s at address 0x%x\n", name, addr); via_aux_add(&drv); } void via_aux_vt1625_probe(struct via_aux_bus *bus) { probe(bus, 0x20); probe(bus, 0x21); }
gpl-2.0
marlontoe/android_kernel_sony_msm8974
drivers/staging/rtl8192u/r8180_93cx6.c
9778
3495
/* This files contains card eeprom (93c46 or 93c56) programming routines, memory is addressed by 16 bits words. This is part of rtl8180 OpenSource driver. Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it> Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the official realtek driver. Parts of this driver are based on the rtl8180 driver skeleton from Patric Schenke & Andres Salomon. Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver. We want to tanks the Authors of those projects and the Ndiswrapper project Authors. */ #include "r8180_93cx6.h" void eprom_cs(struct net_device *dev, short bit) { if(bit) write_nic_byte_E(dev, EPROM_CMD, (1<<EPROM_CS_SHIFT) | \ read_nic_byte_E(dev, EPROM_CMD)); //enable EPROM else write_nic_byte_E(dev, EPROM_CMD, read_nic_byte_E(dev, EPROM_CMD)\ &~(1<<EPROM_CS_SHIFT)); //disable EPROM force_pci_posting(dev); udelay(EPROM_DELAY); } void eprom_ck_cycle(struct net_device *dev) { write_nic_byte_E(dev, EPROM_CMD, (1<<EPROM_CK_SHIFT) | read_nic_byte_E(dev,EPROM_CMD)); force_pci_posting(dev); udelay(EPROM_DELAY); write_nic_byte_E(dev, EPROM_CMD, read_nic_byte_E(dev, EPROM_CMD) &~ (1<<EPROM_CK_SHIFT)); force_pci_posting(dev); udelay(EPROM_DELAY); } void eprom_w(struct net_device *dev,short bit) { if(bit) write_nic_byte_E(dev, EPROM_CMD, (1<<EPROM_W_SHIFT) | \ read_nic_byte_E(dev,EPROM_CMD)); else write_nic_byte_E(dev, EPROM_CMD, read_nic_byte_E(dev,EPROM_CMD)\ &~(1<<EPROM_W_SHIFT)); force_pci_posting(dev); udelay(EPROM_DELAY); } short eprom_r(struct net_device *dev) { short bit; bit=(read_nic_byte_E(dev, EPROM_CMD) & (1<<EPROM_R_SHIFT) ); udelay(EPROM_DELAY); if(bit) return 1; return 0; } void eprom_send_bits_string(struct net_device *dev, short b[], int len) { int i; for(i=0; i<len; i++){ eprom_w(dev, b[i]); eprom_ck_cycle(dev); } } u32 eprom_read(struct net_device *dev, u32 addr) { struct r8192_priv *priv = ieee80211_priv(dev); short read_cmd[]={1,1,0}; short addr_str[8]; int i; int addr_len; u32 ret; ret=0; //enable EPROM programming write_nic_byte_E(dev, EPROM_CMD, (EPROM_CMD_PROGRAM<<EPROM_CMD_OPERATING_MODE_SHIFT)); force_pci_posting(dev); udelay(EPROM_DELAY); if (priv->epromtype==EPROM_93c56){ addr_str[7]=addr & 1; addr_str[6]=addr & (1<<1); addr_str[5]=addr & (1<<2); addr_str[4]=addr & (1<<3); addr_str[3]=addr & (1<<4); addr_str[2]=addr & (1<<5); addr_str[1]=addr & (1<<6); addr_str[0]=addr & (1<<7); addr_len=8; }else{ addr_str[5]=addr & 1; addr_str[4]=addr & (1<<1); addr_str[3]=addr & (1<<2); addr_str[2]=addr & (1<<3); addr_str[1]=addr & (1<<4); addr_str[0]=addr & (1<<5); addr_len=6; } eprom_cs(dev, 1); eprom_ck_cycle(dev); eprom_send_bits_string(dev, read_cmd, 3); eprom_send_bits_string(dev, addr_str, addr_len); //keep chip pin D to low state while reading. //I'm unsure if it is necessary, but anyway shouldn't hurt eprom_w(dev, 0); for(i=0;i<16;i++){ //eeprom needs a clk cycle between writing opcode&adr //and reading data. (eeprom outs a dummy 0) eprom_ck_cycle(dev); ret |= (eprom_r(dev)<<(15-i)); } eprom_cs(dev, 0); eprom_ck_cycle(dev); //disable EPROM programming write_nic_byte_E(dev, EPROM_CMD, (EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT)); return ret; }
gpl-2.0
yueshu/kernel_rk3036
drivers/net/wireless/bcmdhd/dhd_flowring.c
51
27547
/* * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level * * Flow rings are transmit traffic (=propagating towards antenna) related entities * * * Copyright (C) 1999-2016, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * * <<Broadcom-WL-IPTag/Open:>> * * $Id: dhd_flowring.c 591285 2015-10-07 11:56:29Z $ */ #include <typedefs.h> #include <bcmutils.h> #include <bcmendian.h> #include <bcmdevs.h> #include <proto/ethernet.h> #include <proto/bcmevent.h> #include <dngl_stats.h> #include <dhd.h> #include <dhd_flowring.h> #include <dhd_bus.h> #include <dhd_proto.h> #include <dhd_dbg.h> #include <proto/802.1d.h> #include <pcie_core.h> #include <bcmmsgbuf.h> #include <dhd_pcie.h> static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue); static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da); static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da); static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da, uint16 *flowid); int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt); #define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p) #define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x)) #ifdef DHD_LOSSLESS_ROAMING const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 }; #else const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 }; #endif const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */ static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue) { return DHD_FLOW_QUEUE_FULL(queue); } int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt) { return BCME_NORESOURCE; } /** Returns flow ring given a flowid */ flow_ring_node_t * dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid) { flow_ring_node_t * flow_ring_node; ASSERT(dhdp != (dhd_pub_t*)NULL); ASSERT(flowid < dhdp->num_flow_rings); flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]); ASSERT(flow_ring_node->flowid == flowid); return flow_ring_node; } /** Returns 'backup' queue given a flowid */ flow_queue_t * dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid) { flow_ring_node_t * flow_ring_node; flow_ring_node = dhd_flow_ring_node(dhdp, flowid); return &flow_ring_node->queue; } /* Flow ring's queue management functions */ /** Initialize a flow ring's queue, called on driver initialization. */ void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max) { ASSERT((queue != NULL) && (max > 0)); dll_init(&queue->list); queue->head = queue->tail = NULL; queue->len = 0; /* Set queue's threshold and queue's parent cummulative length counter */ ASSERT(max > 1); DHD_FLOW_QUEUE_SET_MAX(queue, max); DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max); DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr); queue->failures = 0U; queue->cb = &dhd_flow_queue_overflow; } /** Register an enqueue overflow callback handler */ void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb) { ASSERT(queue != NULL); queue->cb = cb; } /** * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on * to the flow ring itself. */ int BCMFASTPATH dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) { int ret = BCME_OK; ASSERT(queue != NULL); if (dhd_flow_queue_throttle(queue)) { queue->failures++; ret = (*queue->cb)(queue, pkt); goto done; } if (queue->head) { FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt); } else { queue->head = pkt; } FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); queue->tail = pkt; /* at tail */ queue->len++; /* increment parent's cummulative length */ DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); done: return ret; } /** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */ void * BCMFASTPATH dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue) { void * pkt; ASSERT(queue != NULL); pkt = queue->head; /* from head */ if (pkt == NULL) { ASSERT((queue->len == 0) && (queue->tail == NULL)); goto done; } queue->head = FLOW_QUEUE_PKT_NEXT(pkt); if (queue->head == NULL) queue->tail = NULL; queue->len--; /* decrement parent's cummulative length */ DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */ done: return pkt; } /** Reinsert a dequeued 802.3 packet back at the head */ void BCMFASTPATH dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) { if (queue->head == NULL) { queue->tail = pkt; } FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head); queue->head = pkt; queue->len++; /* increment parent's cummulative length */ DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); } /** Fetch the backup queue for a flowring, and assign flow control thresholds */ void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, int queue_budget, int cumm_threshold, void *cumm_ctr) { flow_queue_t * queue; ASSERT(dhdp != (dhd_pub_t*)NULL); ASSERT(queue_budget > 1); ASSERT(cumm_threshold > 1); ASSERT(cumm_ctr != (void*)NULL); queue = dhd_flow_queue(dhdp, flowid); DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */ /* Set the queue's parent threshold and cummulative counter */ DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold); DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr); } /** Initializes data structures of multiple flow rings */ int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) { uint32 idx; uint32 flow_ring_table_sz; uint32 if_flow_lkup_sz = 0; void * flowid_allocator; flow_ring_table_t *flow_ring_table = NULL; if_flow_lkup_t *if_flow_lkup = NULL; void *lock = NULL; void *list_lock = NULL; unsigned long flags; DHD_INFO(("%s\n", __FUNCTION__)); /* Construct a 16bit flowid allocator */ flowid_allocator = id16_map_init(dhdp->osh, num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED); if (flowid_allocator == NULL) { DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__)); return BCME_NOMEM; } /* Allocate a flow ring table, comprising of requested number of rings */ flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t)); flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz); if (flow_ring_table == NULL) { DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__)); goto fail; } /* Initialize flow ring table state */ DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr); bzero((uchar *)flow_ring_table, flow_ring_table_sz); for (idx = 0; idx < num_flow_rings; idx++) { flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED; flow_ring_table[idx].flowid = (uint16)idx; flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh); if (flow_ring_table[idx].lock == NULL) { DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__)); goto fail; } dll_init(&flow_ring_table[idx].list); /* Initialize the per flow ring backup queue */ dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue, FLOW_RING_QUEUE_THRESHOLD); } /* Allocate per interface hash table (for fast lookup from interface to flow ring) */ if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz); if (if_flow_lkup == NULL) { DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__)); goto fail; } /* Initialize per interface hash table */ for (idx = 0; idx < DHD_MAX_IFS; idx++) { int hash_ix; if_flow_lkup[idx].status = 0; if_flow_lkup[idx].role = 0; for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++) if_flow_lkup[idx].fl_hash[hash_ix] = NULL; } lock = dhd_os_spin_lock_init(dhdp->osh); if (lock == NULL) goto fail; list_lock = dhd_os_spin_lock_init(dhdp->osh); if (list_lock == NULL) goto lock_fail; dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP; bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); #ifdef DHD_LOSSLESS_ROAMING dhdp->dequeue_prec_map = ALLPRIO; #endif /* Now populate into dhd pub */ DHD_FLOWID_LOCK(lock, flags); dhdp->num_flow_rings = num_flow_rings; dhdp->flowid_allocator = (void *)flowid_allocator; dhdp->flow_ring_table = (void *)flow_ring_table; dhdp->if_flow_lkup = (void *)if_flow_lkup; dhdp->flowid_lock = lock; dhdp->flow_rings_inited = TRUE; dhdp->flowring_list_lock = list_lock; DHD_FLOWID_UNLOCK(lock, flags); DHD_INFO(("%s done\n", __FUNCTION__)); return BCME_OK; lock_fail: /* deinit the spinlock */ dhd_os_spin_lock_deinit(dhdp->osh, lock); fail: /* Destruct the per interface flow lkup table */ if (if_flow_lkup != NULL) { DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz); } if (flow_ring_table != NULL) { for (idx = 0; idx < num_flow_rings; idx++) { if (flow_ring_table[idx].lock != NULL) dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); } MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz); } id16_map_fini(dhdp->osh, flowid_allocator); return BCME_NOMEM; } /** Deinit Flow Ring specific data structures */ void dhd_flow_rings_deinit(dhd_pub_t *dhdp) { uint16 idx; uint32 flow_ring_table_sz; uint32 if_flow_lkup_sz; flow_ring_table_t *flow_ring_table; unsigned long flags; void *lock; DHD_INFO(("dhd_flow_rings_deinit\n")); if (!(dhdp->flow_rings_inited)) { DHD_ERROR(("dhd_flow_rings not initialized!\n")); return; } if (dhdp->flow_ring_table != NULL) { ASSERT(dhdp->num_flow_rings > 0); DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; dhdp->flow_ring_table = NULL; DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); for (idx = 0; idx < dhdp->num_flow_rings; idx++) { if (flow_ring_table[idx].active) { dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]); } ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue)); /* Deinit flow ring queue locks before destroying flow ring table */ dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); flow_ring_table[idx].lock = NULL; } /* Destruct the flow ring table */ flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t); MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz); } DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); /* Destruct the per interface flow lkup table */ if (dhdp->if_flow_lkup != NULL) { if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz); DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz); dhdp->if_flow_lkup = NULL; } /* Destruct the flowid allocator */ if (dhdp->flowid_allocator != NULL) dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator); dhdp->num_flow_rings = 0U; bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); lock = dhdp->flowid_lock; dhdp->flowid_lock = NULL; DHD_FLOWID_UNLOCK(lock, flags); dhd_os_spin_lock_deinit(dhdp->osh, lock); dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock); dhdp->flowring_list_lock = NULL; ASSERT(dhdp->if_flow_lkup == NULL); ASSERT(dhdp->flowid_allocator == NULL); ASSERT(dhdp->flow_ring_table == NULL); dhdp->flow_rings_inited = FALSE; } /** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */ uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex) { if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; ASSERT(if_flow_lkup); return if_flow_lkup[ifindex].role; } #ifdef WLTDLS bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da) { tdls_peer_node_t *cur = dhdp->peer_tbl.node; while (cur != NULL) { if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { return TRUE; } cur = cur->next; } return FALSE; } #endif /* WLTDLS */ /** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */ static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) { int hash; bool ismcast = FALSE; flow_hash_info_t *cur; if_flow_lkup_t *if_flow_lkup; unsigned long flags; DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; ASSERT(if_flow_lkup); if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { #ifdef WLTDLS if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) && is_tdls_destination(dhdp, da)) { hash = DHD_FLOWRING_HASHINDEX(da, prio); cur = if_flow_lkup[ifindex].fl_hash[hash]; while (cur != NULL) { if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) { DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); return cur->flowid; } cur = cur->next; } DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); return FLOWID_INVALID; } #endif /* WLTDLS */ cur = if_flow_lkup[ifindex].fl_hash[prio]; if (cur) { DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); return cur->flowid; } } else { if (ETHER_ISMULTI(da)) { ismcast = TRUE; hash = 0; } else { hash = DHD_FLOWRING_HASHINDEX(da, prio); } cur = if_flow_lkup[ifindex].fl_hash[hash]; while (cur) { if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) || (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) && (cur->flow_info.tid == prio))) { DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); return cur->flowid; } cur = cur->next; } } DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__)); return FLOWID_INVALID; } /* dhd_flowid_find */ /** Create unique Flow ID, called when a flow ring is created. */ static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) { flow_hash_info_t *fl_hash_node, *cur; if_flow_lkup_t *if_flow_lkup; int hash; uint16 flowid; unsigned long flags; fl_hash_node = (flow_hash_info_t *) MALLOC(dhdp->osh, sizeof(flow_hash_info_t)); memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da)); DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); ASSERT(dhdp->flowid_allocator != NULL); flowid = id16_map_alloc(dhdp->flowid_allocator); DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); if (flowid == FLOWID_INVALID) { MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t)); DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__)); return FLOWID_INVALID; } fl_hash_node->flowid = flowid; fl_hash_node->flow_info.tid = prio; fl_hash_node->flow_info.ifindex = ifindex; fl_hash_node->next = NULL; DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { /* For STA non TDLS dest we allocate entry based on prio only */ #ifdef WLTDLS if (dhdp->peer_tbl.tdls_peer_count && (is_tdls_destination(dhdp, da))) { hash = DHD_FLOWRING_HASHINDEX(da, prio); cur = if_flow_lkup[ifindex].fl_hash[hash]; if (cur) { while (cur->next) { cur = cur->next; } cur->next = fl_hash_node; } else { if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node; } } else #endif /* WLTDLS */ if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node; } else { /* For bcast/mcast assign first slot in in interface */ hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio); cur = if_flow_lkup[ifindex].fl_hash[hash]; if (cur) { while (cur->next) { cur = cur->next; } cur->next = fl_hash_node; } else if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node; } DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid)); return fl_hash_node->flowid; } /* dhd_flowid_alloc */ /** Get flow ring ID, if not present try to create one */ static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da, uint16 *flowid) { uint16 id; flow_ring_node_t *flow_ring_node; flow_ring_table_t *flow_ring_table; unsigned long flags; int ret; DHD_INFO(("%s\n", __FUNCTION__)); if (!dhdp->flow_ring_table) { return BCME_ERROR; } flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; id = dhd_flowid_find(dhdp, ifindex, prio, sa, da); if (id == FLOWID_INVALID) { if_flow_lkup_t *if_flow_lkup; if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; if (!if_flow_lkup[ifindex].status) return BCME_ERROR; id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da); if (id == FLOWID_INVALID) { DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n", __FUNCTION__, ifindex, if_flow_lkup[ifindex].status)); return BCME_ERROR; } /* register this flowid in dhd_pub */ dhd_add_flowid(dhdp, ifindex, prio, da, id); ASSERT(id < dhdp->num_flow_rings); flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); /* Init Flow info */ memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa)); memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da)); flow_ring_node->flow_info.tid = prio; flow_ring_node->flow_info.ifindex = ifindex; flow_ring_node->active = TRUE; flow_ring_node->status = FLOW_RING_STATUS_PENDING; DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); /* Create and inform device about the new flow */ if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node) != BCME_OK) { DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id)); return BCME_ERROR; } *flowid = id; return BCME_OK; } else { /* if the Flow id was found in the hash */ ASSERT(id < dhdp->num_flow_rings); flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); /* * If the flow_ring_node is in Open State or Status pending state then * we can return the Flow id to the caller.If the flow_ring_node is in * FLOW_RING_STATUS_PENDING this means the creation is in progress and * hence the packets should be queued. * * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or * FLOW_RING_STATUS_CLOSED, then we should return Error. * Note that if the flowing is being deleted we would mark it as * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets. * We should drop the packets in that case. * The decission to return OK should NOT be based on 'active' variable, beause * active is made TRUE when a flow_ring_node gets allocated and is made * FALSE when the flow ring gets removed and does not reflect the True state * of the Flow ring. */ if (flow_ring_node->status == FLOW_RING_STATUS_OPEN || flow_ring_node->status == FLOW_RING_STATUS_PENDING) { *flowid = id; ret = BCME_OK; } else { *flowid = FLOWID_INVALID; ret = BCME_ERROR; } DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); return ret; } /* Flow Id found in the hash */ } /* dhd_flowid_lookup */ /** * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to * select the flowring to send the packet to the dongle. */ int BCMFASTPATH dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf) { uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); struct ether_header *eh = (struct ether_header *)pktdata; uint16 flowid; ASSERT(ifindex < DHD_MAX_IFS); if (ifindex >= DHD_MAX_IFS) { return BCME_BADARG; } if (!dhdp->flowid_allocator) { DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); return BCME_ERROR; } if (dhd_flowid_lookup(dhdp, ifindex, prio, eh->ether_shost, eh->ether_dhost, &flowid) != BCME_OK) { return BCME_ERROR; } DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid)); /* Tag the packet with flowid */ DHD_PKT_SET_FLOWID(pktbuf, flowid); return BCME_OK; } void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) { int hashix; bool found = FALSE; flow_hash_info_t *cur, *prev; if_flow_lkup_t *if_flow_lkup; unsigned long flags; DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) { cur = if_flow_lkup[ifindex].fl_hash[hashix]; if (cur) { if (cur->flowid == flowid) { found = TRUE; } prev = NULL; while (!found && cur) { if (cur->flowid == flowid) { found = TRUE; break; } prev = cur; cur = cur->next; } if (found) { if (!prev) { if_flow_lkup[ifindex].fl_hash[hashix] = cur->next; } else { prev->next = cur->next; } /* deregister flowid from dhd_pub. */ dhd_del_flowid(dhdp, ifindex, flowid); id16_map_free(dhdp->flowid_allocator, flowid); DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t)); return; } } } DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n", __FUNCTION__, flowid)); } /* dhd_flowid_free */ /** * Delete all Flow rings associated with the given interface. Is called when e.g. the dongle * indicates that a wireless link has gone down. */ void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex) { uint32 id; flow_ring_table_t *flow_ring_table; DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex)); ASSERT(ifindex < DHD_MAX_IFS); if (ifindex >= DHD_MAX_IFS) return; if (!dhdp->flow_ring_table) return; flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; for (id = 0; id < dhdp->num_flow_rings; id++) { if (flow_ring_table[id].active && (flow_ring_table[id].flow_info.ifindex == ifindex)) { dhd_bus_flow_ring_delete_request(dhdp->bus, (void *) &flow_ring_table[id]); } } } /** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */ void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr) { uint32 id; flow_ring_table_t *flow_ring_table; DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex)); ASSERT(ifindex < DHD_MAX_IFS); if (ifindex >= DHD_MAX_IFS) return; if (!dhdp->flow_ring_table) return; flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; for (id = 0; id < dhdp->num_flow_rings; id++) { if (flow_ring_table[id].active && (flow_ring_table[id].flow_info.ifindex == ifindex) && (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) && (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) { DHD_INFO(("%s: deleting flowid %d\n", __FUNCTION__, flow_ring_table[id].flowid)); dhd_bus_flow_ring_delete_request(dhdp->bus, (void *) &flow_ring_table[id]); } } } /** Handles interface ADD, CHANGE, DEL indications from the dongle */ void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, uint8 op, uint8 role) { if_flow_lkup_t *if_flow_lkup; unsigned long flags; ASSERT(ifindex < DHD_MAX_IFS); if (ifindex >= DHD_MAX_IFS) return; DHD_INFO(("%s: ifindex %u op %u role is %u \n", __FUNCTION__, ifindex, op, role)); if (!dhdp->flowid_allocator) { DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); return; } DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) { if_flow_lkup[ifindex].role = role; if (role != WLC_E_IF_ROLE_STA) { if_flow_lkup[ifindex].status = TRUE; DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n", __FUNCTION__, ifindex, role)); /* Create Mcast Flow */ } } else if (op == WLC_E_IF_DEL) { if_flow_lkup[ifindex].status = FALSE; DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n", __FUNCTION__, ifindex, role)); } DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); } /** Handles a STA 'link' indication from the dongle */ int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status) { if_flow_lkup_t *if_flow_lkup; unsigned long flags; ASSERT(ifindex < DHD_MAX_IFS); if (ifindex >= DHD_MAX_IFS) return BCME_BADARG; DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status)); DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { if (status) if_flow_lkup[ifindex].status = TRUE; else if_flow_lkup[ifindex].status = FALSE; } DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); return BCME_OK; } /** Update flow priority mapping, called on IOVAR */ int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map) { uint16 flowid; flow_ring_node_t *flow_ring_node; if (map > DHD_FLOW_PRIO_LLR_MAP) return BCME_BADOPTION; /* Check if we need to change prio map */ if (map == dhdp->flow_prio_map_type) return BCME_OK; /* If any ring is active we cannot change priority mapping for flow rings */ for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { flow_ring_node = DHD_FLOW_RING(dhdp, flowid); if (flow_ring_node->active) return BCME_EPERM; } /* Inform firmware about new mapping type */ if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE)) return BCME_ERROR; /* update internal structures */ dhdp->flow_prio_map_type = map; if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP) bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); else bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); return BCME_OK; } /** Inform firmware on updated flow priority mapping, called on IOVAR */ int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set) { uint8 iovbuf[24]; if (!set) { bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf)); if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) { DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__)); return BCME_ERROR; } *map = iovbuf[0]; return BCME_OK; } bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf)); if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) { DHD_ERROR(("%s: failed to set fl_prio_map \n", __FUNCTION__)); return BCME_ERROR; } return BCME_OK; }
gpl-2.0
fulcrum7/mq107-kernel
drivers/md/bitmap.c
51
55962
/* * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 * * bitmap_create - sets up the bitmap structure * bitmap_destroy - destroys the bitmap structure * * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.: * - added disk storage for bitmap * - changes to allow various bitmap chunk sizes */ /* * Still to do: * * flush after percent set rather than just time based. (maybe both). */ #include <linux/blkdev.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/sched.h> #include <linux/list.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/buffer_head.h> #include "md.h" #include "bitmap.h" #include <linux/dm-dirty-log.h> /* debug macros */ #define DEBUG 0 #if DEBUG /* these are for debugging purposes only! */ /* define one and only one of these */ #define INJECT_FAULTS_1 0 /* cause bitmap_alloc_page to fail always */ #define INJECT_FAULTS_2 0 /* cause bitmap file to be kicked when first bit set*/ #define INJECT_FAULTS_3 0 /* treat bitmap file as kicked at init time */ #define INJECT_FAULTS_4 0 /* undef */ #define INJECT_FAULTS_5 0 /* undef */ #define INJECT_FAULTS_6 0 /* if these are defined, the driver will fail! debug only */ #define INJECT_FATAL_FAULT_1 0 /* fail kmalloc, causing bitmap_create to fail */ #define INJECT_FATAL_FAULT_2 0 /* undef */ #define INJECT_FATAL_FAULT_3 0 /* undef */ #endif #ifndef PRINTK # if DEBUG > 0 # define PRINTK(x...) printk(KERN_DEBUG x) # else # define PRINTK(x...) # endif #endif static inline char *bmname(struct bitmap *bitmap) { return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; } /* * just a placeholder - calls kmalloc for bitmap pages */ static unsigned char *bitmap_alloc_page(struct bitmap *bitmap) { unsigned char *page; #ifdef INJECT_FAULTS_1 page = NULL; #else page = kzalloc(PAGE_SIZE, GFP_NOIO); #endif if (!page) printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap)); else PRINTK("%s: bitmap_alloc_page: allocated page at %p\n", bmname(bitmap), page); return page; } /* * for now just a placeholder -- just calls kfree for bitmap pages */ static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page) { PRINTK("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page); kfree(page); } /* * check a page and, if necessary, allocate it (or hijack it if the alloc fails) * * 1) check to see if this page is allocated, if it's not then try to alloc * 2) if the alloc fails, set the page's hijacked flag so we'll use the * page pointer directly as a counter * * if we find our page, we increment the page's refcount so that it stays * allocated while we're using it */ static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create) __releases(bitmap->lock) __acquires(bitmap->lock) { unsigned char *mappage; if (page >= bitmap->pages) { /* This can happen if bitmap_start_sync goes beyond * End-of-device while looking for a whole page. * It is harmless. */ return -EINVAL; } if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ return 0; if (bitmap->bp[page].map) /* page is already allocated, just return */ return 0; if (!create) return -ENOENT; /* this page has not been allocated yet */ spin_unlock_irq(&bitmap->lock); mappage = bitmap_alloc_page(bitmap); spin_lock_irq(&bitmap->lock); if (mappage == NULL) { PRINTK("%s: bitmap map page allocation failed, hijacking\n", bmname(bitmap)); /* failed - set the hijacked flag so that we can use the * pointer as a counter */ if (!bitmap->bp[page].map) bitmap->bp[page].hijacked = 1; } else if (bitmap->bp[page].map || bitmap->bp[page].hijacked) { /* somebody beat us to getting the page */ bitmap_free_page(bitmap, mappage); return 0; } else { /* no page was in place and we have one, so install it */ bitmap->bp[page].map = mappage; bitmap->missing_pages--; } return 0; } /* if page is completely empty, put it back on the free list, or dealloc it */ /* if page was hijacked, unmark the flag so it might get alloced next time */ /* Note: lock should be held when calling this */ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) { char *ptr; if (bitmap->bp[page].count) /* page is still busy */ return; /* page is no longer in use, it can be released */ if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ bitmap->bp[page].hijacked = 0; bitmap->bp[page].map = NULL; } else { /* normal case, free the page */ ptr = bitmap->bp[page].map; bitmap->bp[page].map = NULL; bitmap->missing_pages++; bitmap_free_page(bitmap, ptr); } } /* * bitmap file handling - read and write the bitmap file and its superblock */ /* * basic page I/O operations */ /* IO operations when bitmap is stored near all superblocks */ static struct page *read_sb_page(mddev_t *mddev, loff_t offset, struct page *page, unsigned long index, int size) { /* choose a good rdev and read the page from there */ mdk_rdev_t *rdev; sector_t target; int did_alloc = 0; if (!page) { page = alloc_page(GFP_KERNEL); if (!page) return ERR_PTR(-ENOMEM); did_alloc = 1; } list_for_each_entry(rdev, &mddev->disks, same_set) { if (! test_bit(In_sync, &rdev->flags) || test_bit(Faulty, &rdev->flags)) continue; target = rdev->sb_start + offset + index * (PAGE_SIZE/512); if (sync_page_io(rdev, target, roundup(size, bdev_logical_block_size(rdev->bdev)), page, READ)) { page->index = index; attach_page_buffers(page, NULL); /* so that free_buffer will * quietly no-op */ return page; } } if (did_alloc) put_page(page); return ERR_PTR(-EIO); } static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev) { /* Iterate the disks of an mddev, using rcu to protect access to the * linked list, and raising the refcount of devices we return to ensure * they don't disappear while in use. * As devices are only added or removed when raid_disk is < 0 and * nr_pending is 0 and In_sync is clear, the entries we return will * still be in the same position on the list when we re-enter * list_for_each_continue_rcu. */ struct list_head *pos; rcu_read_lock(); if (rdev == NULL) /* start at the beginning */ pos = &mddev->disks; else { /* release the previous rdev and start from there. */ rdev_dec_pending(rdev, mddev); pos = &rdev->same_set; } list_for_each_continue_rcu(pos, &mddev->disks) { rdev = list_entry(pos, mdk_rdev_t, same_set); if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags)) { /* this is a usable devices */ atomic_inc(&rdev->nr_pending); rcu_read_unlock(); return rdev; } } rcu_read_unlock(); return NULL; } static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) { mdk_rdev_t *rdev = NULL; mddev_t *mddev = bitmap->mddev; while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { int size = PAGE_SIZE; loff_t offset = mddev->bitmap_info.offset; if (page->index == bitmap->file_pages-1) size = roundup(bitmap->last_page_size, bdev_logical_block_size(rdev->bdev)); /* Just make sure we aren't corrupting data or * metadata */ if (mddev->external) { /* Bitmap could be anywhere. */ if (rdev->sb_start + offset + (page->index * (PAGE_SIZE/512)) > rdev->data_offset && rdev->sb_start + offset < (rdev->data_offset + mddev->dev_sectors + (PAGE_SIZE/512))) goto bad_alignment; } else if (offset < 0) { /* DATA BITMAP METADATA */ if (offset + (long)(page->index * (PAGE_SIZE/512)) + size/512 > 0) /* bitmap runs in to metadata */ goto bad_alignment; if (rdev->data_offset + mddev->dev_sectors > rdev->sb_start + offset) /* data runs in to bitmap */ goto bad_alignment; } else if (rdev->sb_start < rdev->data_offset) { /* METADATA BITMAP DATA */ if (rdev->sb_start + offset + page->index*(PAGE_SIZE/512) + size/512 > rdev->data_offset) /* bitmap runs in to data */ goto bad_alignment; } else { /* DATA METADATA BITMAP - no problems */ } md_super_write(mddev, rdev, rdev->sb_start + offset + page->index * (PAGE_SIZE/512), size, page); } if (wait) md_super_wait(mddev); return 0; bad_alignment: return -EINVAL; } static void bitmap_file_kick(struct bitmap *bitmap); /* * write out a page to a file */ static void write_page(struct bitmap *bitmap, struct page *page, int wait) { struct buffer_head *bh; if (bitmap->file == NULL) { switch (write_sb_page(bitmap, page, wait)) { case -EINVAL: bitmap->flags |= BITMAP_WRITE_ERROR; } } else { bh = page_buffers(page); while (bh && bh->b_blocknr) { atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh); bh = bh->b_this_page; } if (wait) wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); } if (bitmap->flags & BITMAP_WRITE_ERROR) bitmap_file_kick(bitmap); } static void end_bitmap_write(struct buffer_head *bh, int uptodate) { struct bitmap *bitmap = bh->b_private; unsigned long flags; if (!uptodate) { spin_lock_irqsave(&bitmap->lock, flags); bitmap->flags |= BITMAP_WRITE_ERROR; spin_unlock_irqrestore(&bitmap->lock, flags); } if (atomic_dec_and_test(&bitmap->pending_writes)) wake_up(&bitmap->write_wait); } /* copied from buffer.c */ static void __clear_page_buffers(struct page *page) { ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); } static void free_buffers(struct page *page) { struct buffer_head *bh = page_buffers(page); while (bh) { struct buffer_head *next = bh->b_this_page; free_buffer_head(bh); bh = next; } __clear_page_buffers(page); put_page(page); } /* read a page from a file. * We both read the page, and attach buffers to the page to record the * address of each block (using bmap). These addresses will be used * to write the block later, completely bypassing the filesystem. * This usage is similar to how swap files are handled, and allows us * to write to a file with no concerns of memory allocation failing. */ static struct page *read_page(struct file *file, unsigned long index, struct bitmap *bitmap, unsigned long count) { struct page *page = NULL; struct inode *inode = file->f_path.dentry->d_inode; struct buffer_head *bh; sector_t block; PRINTK("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, (unsigned long long)index << PAGE_SHIFT); page = alloc_page(GFP_KERNEL); if (!page) page = ERR_PTR(-ENOMEM); if (IS_ERR(page)) goto out; bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0); if (!bh) { put_page(page); page = ERR_PTR(-ENOMEM); goto out; } attach_page_buffers(page, bh); block = index << (PAGE_SHIFT - inode->i_blkbits); while (bh) { if (count == 0) bh->b_blocknr = 0; else { bh->b_blocknr = bmap(inode, block); if (bh->b_blocknr == 0) { /* Cannot use this file! */ free_buffers(page); page = ERR_PTR(-EINVAL); goto out; } bh->b_bdev = inode->i_sb->s_bdev; if (count < (1<<inode->i_blkbits)) count = 0; else count -= (1<<inode->i_blkbits); bh->b_end_io = end_bitmap_write; bh->b_private = bitmap; atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); submit_bh(READ, bh); } block++; bh = bh->b_this_page; } page->index = index; wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); if (bitmap->flags & BITMAP_WRITE_ERROR) { free_buffers(page); page = ERR_PTR(-EIO); } out: if (IS_ERR(page)) printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n", (int)PAGE_SIZE, (unsigned long long)index << PAGE_SHIFT, PTR_ERR(page)); return page; } /* * bitmap file superblock operations */ /* update the event counter and sync the superblock to disk */ void bitmap_update_sb(struct bitmap *bitmap) { bitmap_super_t *sb; unsigned long flags; if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ return; if (bitmap->mddev->bitmap_info.external) return; spin_lock_irqsave(&bitmap->lock, flags); if (!bitmap->sb_page) { /* no superblock */ spin_unlock_irqrestore(&bitmap->lock, flags); return; } spin_unlock_irqrestore(&bitmap->lock, flags); sb = kmap_atomic(bitmap->sb_page, KM_USER0); sb->events = cpu_to_le64(bitmap->mddev->events); if (bitmap->mddev->events < bitmap->events_cleared) { /* rocking back to read-only */ bitmap->events_cleared = bitmap->mddev->events; sb->events_cleared = cpu_to_le64(bitmap->events_cleared); } /* Just in case these have been changed via sysfs: */ sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); kunmap_atomic(sb, KM_USER0); write_page(bitmap, bitmap->sb_page, 1); } /* print out the bitmap file superblock */ void bitmap_print_sb(struct bitmap *bitmap) { bitmap_super_t *sb; if (!bitmap || !bitmap->sb_page) return; sb = kmap_atomic(bitmap->sb_page, KM_USER0); printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n", *(__u32 *)(sb->uuid+0), *(__u32 *)(sb->uuid+4), *(__u32 *)(sb->uuid+8), *(__u32 *)(sb->uuid+12)); printk(KERN_DEBUG " events: %llu\n", (unsigned long long) le64_to_cpu(sb->events)); printk(KERN_DEBUG "events cleared: %llu\n", (unsigned long long) le64_to_cpu(sb->events_cleared)); printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state)); printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize)); printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); printk(KERN_DEBUG " sync size: %llu KB\n", (unsigned long long)le64_to_cpu(sb->sync_size)/2); printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); kunmap_atomic(sb, KM_USER0); } /* read the superblock from the bitmap file and initialize some bitmap fields */ static int bitmap_read_sb(struct bitmap *bitmap) { char *reason = NULL; bitmap_super_t *sb; unsigned long chunksize, daemon_sleep, write_behind; unsigned long long events; int err = -EINVAL; /* page 0 is the superblock, read it... */ if (bitmap->file) { loff_t isize = i_size_read(bitmap->file->f_mapping->host); int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize; bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes); } else { bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->mddev->bitmap_info.offset, NULL, 0, sizeof(bitmap_super_t)); } if (IS_ERR(bitmap->sb_page)) { err = PTR_ERR(bitmap->sb_page); bitmap->sb_page = NULL; return err; } sb = kmap_atomic(bitmap->sb_page, KM_USER0); chunksize = le32_to_cpu(sb->chunksize); daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; write_behind = le32_to_cpu(sb->write_behind); /* verify that the bitmap-specific fields are valid */ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) reason = "bad magic"; else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) reason = "unrecognized superblock version"; else if (chunksize < 512) reason = "bitmap chunksize too small"; else if ((1 << ffz(~chunksize)) != chunksize) reason = "bitmap chunksize not a power of 2"; else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) reason = "daemon sleep period out of range"; else if (write_behind > COUNTER_MAX) reason = "write-behind limit out of range (0 - 16383)"; if (reason) { printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n", bmname(bitmap), reason); goto out; } /* keep the array size field of the bitmap superblock up to date */ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); if (!bitmap->mddev->persistent) goto success; /* * if we have a persistent array superblock, compare the * bitmap's UUID and event counter to the mddev's */ if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { printk(KERN_INFO "%s: bitmap superblock UUID mismatch\n", bmname(bitmap)); goto out; } events = le64_to_cpu(sb->events); if (events < bitmap->mddev->events) { printk(KERN_INFO "%s: bitmap file is out of date (%llu < %llu) " "-- forcing full recovery\n", bmname(bitmap), events, (unsigned long long) bitmap->mddev->events); sb->state |= cpu_to_le32(BITMAP_STALE); } success: /* assign fields using values from superblock */ bitmap->mddev->bitmap_info.chunksize = chunksize; bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; bitmap->mddev->bitmap_info.max_write_behind = write_behind; bitmap->flags |= le32_to_cpu(sb->state); if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) bitmap->flags |= BITMAP_HOSTENDIAN; bitmap->events_cleared = le64_to_cpu(sb->events_cleared); if (sb->state & cpu_to_le32(BITMAP_STALE)) bitmap->events_cleared = bitmap->mddev->events; err = 0; out: kunmap_atomic(sb, KM_USER0); if (err) bitmap_print_sb(bitmap); return err; } enum bitmap_mask_op { MASK_SET, MASK_UNSET }; /* record the state of the bitmap in the superblock. Return the old value */ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, enum bitmap_mask_op op) { bitmap_super_t *sb; unsigned long flags; int old; spin_lock_irqsave(&bitmap->lock, flags); if (!bitmap->sb_page) { /* can't set the state */ spin_unlock_irqrestore(&bitmap->lock, flags); return 0; } spin_unlock_irqrestore(&bitmap->lock, flags); sb = kmap_atomic(bitmap->sb_page, KM_USER0); old = le32_to_cpu(sb->state) & bits; switch (op) { case MASK_SET: sb->state |= cpu_to_le32(bits); break; case MASK_UNSET: sb->state &= cpu_to_le32(~bits); break; default: BUG(); } kunmap_atomic(sb, KM_USER0); return old; } /* * general bitmap file operations */ /* * on-disk bitmap: * * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap * file a page at a time. There's a superblock at the start of the file. */ /* calculate the index of the page that contains this bit */ static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk) { if (!bitmap->mddev->bitmap_info.external) chunk += sizeof(bitmap_super_t) << 3; return chunk >> PAGE_BIT_SHIFT; } /* calculate the (bit) offset of this bit within a page */ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk) { if (!bitmap->mddev->bitmap_info.external) chunk += sizeof(bitmap_super_t) << 3; return chunk & (PAGE_BITS - 1); } /* * return a pointer to the page in the filemap that contains the given bit * * this lookup is complicated by the fact that the bitmap sb might be exactly * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page * 0 or page 1 */ static inline struct page *filemap_get_page(struct bitmap *bitmap, unsigned long chunk) { if (bitmap->filemap == NULL) return NULL; if (file_page_index(bitmap, chunk) >= bitmap->file_pages) return NULL; return bitmap->filemap[file_page_index(bitmap, chunk) - file_page_index(bitmap, 0)]; } static void bitmap_file_unmap(struct bitmap *bitmap) { struct page **map, *sb_page; unsigned long *attr; int pages; unsigned long flags; spin_lock_irqsave(&bitmap->lock, flags); map = bitmap->filemap; bitmap->filemap = NULL; attr = bitmap->filemap_attr; bitmap->filemap_attr = NULL; pages = bitmap->file_pages; bitmap->file_pages = 0; sb_page = bitmap->sb_page; bitmap->sb_page = NULL; spin_unlock_irqrestore(&bitmap->lock, flags); while (pages--) if (map[pages] != sb_page) /* 0 is sb_page, release it below */ free_buffers(map[pages]); kfree(map); kfree(attr); if (sb_page) free_buffers(sb_page); } static void bitmap_file_put(struct bitmap *bitmap) { struct file *file; unsigned long flags; spin_lock_irqsave(&bitmap->lock, flags); file = bitmap->file; bitmap->file = NULL; spin_unlock_irqrestore(&bitmap->lock, flags); if (file) wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); bitmap_file_unmap(bitmap); if (file) { struct inode *inode = file->f_path.dentry->d_inode; invalidate_mapping_pages(inode->i_mapping, 0, -1); fput(file); } } /* * bitmap_file_kick - if an error occurs while manipulating the bitmap file * then it is no longer reliable, so we stop using it and we mark the file * as failed in the superblock */ static void bitmap_file_kick(struct bitmap *bitmap) { char *path, *ptr = NULL; if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) { bitmap_update_sb(bitmap); if (bitmap->file) { path = kmalloc(PAGE_SIZE, GFP_KERNEL); if (path) ptr = d_path(&bitmap->file->f_path, path, PAGE_SIZE); printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n", bmname(bitmap), IS_ERR(ptr) ? "" : ptr); kfree(path); } else printk(KERN_ALERT "%s: disabling internal bitmap due to errors\n", bmname(bitmap)); } bitmap_file_put(bitmap); return; } enum bitmap_page_attr { BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */ BITMAP_PAGE_CLEAN = 1, /* there are bits that might need to be cleared */ BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */ }; static inline void set_page_attr(struct bitmap *bitmap, struct page *page, enum bitmap_page_attr attr) { if (page) __set_bit((page->index<<2) + attr, bitmap->filemap_attr); else __set_bit(attr, &bitmap->logattrs); } static inline void clear_page_attr(struct bitmap *bitmap, struct page *page, enum bitmap_page_attr attr) { if (page) __clear_bit((page->index<<2) + attr, bitmap->filemap_attr); else __clear_bit(attr, &bitmap->logattrs); } static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page, enum bitmap_page_attr attr) { if (page) return test_bit((page->index<<2) + attr, bitmap->filemap_attr); else return test_bit(attr, &bitmap->logattrs); } /* * bitmap_file_set_bit -- called before performing a write to the md device * to set (and eventually sync) a particular bit in the bitmap file * * we set the bit immediately, then we record the page number so that * when an unplug occurs, we can flush the dirty pages out to disk */ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) { unsigned long bit; struct page *page = NULL; void *kaddr; unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap); if (!bitmap->filemap) { struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log; if (log) log->type->mark_region(log, chunk); } else { page = filemap_get_page(bitmap, chunk); if (!page) return; bit = file_page_offset(bitmap, chunk); /* set the bit */ kaddr = kmap_atomic(page, KM_USER0); if (bitmap->flags & BITMAP_HOSTENDIAN) set_bit(bit, kaddr); else ext2_set_bit(bit, kaddr); kunmap_atomic(kaddr, KM_USER0); PRINTK("set file bit %lu page %lu\n", bit, page->index); } /* record page number so it gets flushed to disk when unplug occurs */ set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); } /* this gets called when the md device is ready to unplug its underlying * (slave) device queues -- before we let any writes go down, we need to * sync the dirty pages of the bitmap file to disk */ void bitmap_unplug(struct bitmap *bitmap) { unsigned long i, flags; int dirty, need_write; struct page *page; int wait = 0; if (!bitmap) return; if (!bitmap->filemap) { /* Must be using a dirty_log */ struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log; dirty = test_and_clear_bit(BITMAP_PAGE_DIRTY, &bitmap->logattrs); need_write = test_and_clear_bit(BITMAP_PAGE_NEEDWRITE, &bitmap->logattrs); if (dirty || need_write) if (log->type->flush(log)) bitmap->flags |= BITMAP_WRITE_ERROR; goto out; } /* look at each page to see if there are any set bits that need to be * flushed out to disk */ for (i = 0; i < bitmap->file_pages; i++) { spin_lock_irqsave(&bitmap->lock, flags); if (!bitmap->filemap) { spin_unlock_irqrestore(&bitmap->lock, flags); return; } page = bitmap->filemap[i]; dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); if (dirty) wait = 1; spin_unlock_irqrestore(&bitmap->lock, flags); if (dirty || need_write) write_page(bitmap, page, 0); } if (wait) { /* if any writes were performed, we need to wait on them */ if (bitmap->file) wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); else md_super_wait(bitmap->mddev); } out: if (bitmap->flags & BITMAP_WRITE_ERROR) bitmap_file_kick(bitmap); } EXPORT_SYMBOL(bitmap_unplug); static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); /* * bitmap_init_from_disk -- called at bitmap_create time to initialize * the in-memory bitmap from the on-disk bitmap -- also, sets up the * memory mapping of the bitmap file * Special cases: * if there's no bitmap file, or if the bitmap file had been * previously kicked from the array, we mark all the bits as * 1's in order to cause a full resync. * * We ignore all bits for sectors that end earlier than 'start'. * This is used when reading an out-of-date bitmap... */ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) { unsigned long i, chunks, index, oldindex, bit; struct page *page = NULL, *oldpage = NULL; unsigned long num_pages, bit_cnt = 0; struct file *file; unsigned long bytes, offset; int outofdate; int ret = -ENOSPC; void *paddr; chunks = bitmap->chunks; file = bitmap->file; BUG_ON(!file && !bitmap->mddev->bitmap_info.offset); #ifdef INJECT_FAULTS_3 outofdate = 1; #else outofdate = bitmap->flags & BITMAP_STALE; #endif if (outofdate) printk(KERN_INFO "%s: bitmap file is out of date, doing full " "recovery\n", bmname(bitmap)); bytes = DIV_ROUND_UP(bitmap->chunks, 8); if (!bitmap->mddev->bitmap_info.external) bytes += sizeof(bitmap_super_t); num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); if (file && i_size_read(file->f_mapping->host) < bytes) { printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n", bmname(bitmap), (unsigned long) i_size_read(file->f_mapping->host), bytes); goto err; } ret = -ENOMEM; bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); if (!bitmap->filemap) goto err; /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */ bitmap->filemap_attr = kzalloc( roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), GFP_KERNEL); if (!bitmap->filemap_attr) goto err; oldindex = ~0L; for (i = 0; i < chunks; i++) { int b; index = file_page_index(bitmap, i); bit = file_page_offset(bitmap, i); if (index != oldindex) { /* this is a new page, read it in */ int count; /* unmap the old page, we're done with it */ if (index == num_pages-1) count = bytes - index * PAGE_SIZE; else count = PAGE_SIZE; if (index == 0 && bitmap->sb_page) { /* * if we're here then the superblock page * contains some bits (PAGE_SIZE != sizeof sb) * we've already read it in, so just use it */ page = bitmap->sb_page; offset = sizeof(bitmap_super_t); if (!file) page = read_sb_page( bitmap->mddev, bitmap->mddev->bitmap_info.offset, page, index, count); } else if (file) { page = read_page(file, index, bitmap, count); offset = 0; } else { page = read_sb_page(bitmap->mddev, bitmap->mddev->bitmap_info.offset, NULL, index, count); offset = 0; } if (IS_ERR(page)) { /* read error */ ret = PTR_ERR(page); goto err; } oldindex = index; oldpage = page; bitmap->filemap[bitmap->file_pages++] = page; bitmap->last_page_size = count; if (outofdate) { /* * if bitmap is out of date, dirty the * whole page and write it out */ paddr = kmap_atomic(page, KM_USER0); memset(paddr + offset, 0xff, PAGE_SIZE - offset); kunmap_atomic(paddr, KM_USER0); write_page(bitmap, page, 1); ret = -EIO; if (bitmap->flags & BITMAP_WRITE_ERROR) goto err; } } paddr = kmap_atomic(page, KM_USER0); if (bitmap->flags & BITMAP_HOSTENDIAN) b = test_bit(bit, paddr); else b = ext2_test_bit(bit, paddr); kunmap_atomic(paddr, KM_USER0); if (b) { /* if the disk bit is set, set the memory bit */ int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap)) >= start); bitmap_set_memory_bits(bitmap, (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap), needed); bit_cnt++; set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); } } /* everything went OK */ ret = 0; bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET); if (bit_cnt) { /* Kick recovery if any bits were set */ set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); md_wakeup_thread(bitmap->mddev->thread); } printk(KERN_INFO "%s: bitmap initialized from disk: " "read %lu/%lu pages, set %lu bits\n", bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt); return 0; err: printk(KERN_INFO "%s: bitmap initialisation failed: %d\n", bmname(bitmap), ret); return ret; } void bitmap_write_all(struct bitmap *bitmap) { /* We don't actually write all bitmap blocks here, * just flag them as needing to be written */ int i; for (i = 0; i < bitmap->file_pages; i++) set_page_attr(bitmap, bitmap->filemap[i], BITMAP_PAGE_NEEDWRITE); } static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) { sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap); unsigned long page = chunk >> PAGE_COUNTER_SHIFT; bitmap->bp[page].count += inc; bitmap_checkfree(bitmap, page); } static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int create); /* * bitmap daemon -- periodically wakes up to clean bits and flush pages * out to disk */ void bitmap_daemon_work(mddev_t *mddev) { struct bitmap *bitmap; unsigned long j; unsigned long flags; struct page *page = NULL, *lastpage = NULL; sector_t blocks; void *paddr; struct dm_dirty_log *log = mddev->bitmap_info.log; /* Use a mutex to guard daemon_work against * bitmap_destroy. */ mutex_lock(&mddev->bitmap_info.mutex); bitmap = mddev->bitmap; if (bitmap == NULL) { mutex_unlock(&mddev->bitmap_info.mutex); return; } if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->mddev->bitmap_info.daemon_sleep)) goto done; bitmap->daemon_lastrun = jiffies; if (bitmap->allclean) { bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; goto done; } bitmap->allclean = 1; spin_lock_irqsave(&bitmap->lock, flags); for (j = 0; j < bitmap->chunks; j++) { bitmap_counter_t *bmc; if (!bitmap->filemap) { if (!log) /* error or shutdown */ break; } else page = filemap_get_page(bitmap, j); if (page != lastpage) { /* skip this page unless it's marked as needing cleaning */ if (!test_page_attr(bitmap, page, BITMAP_PAGE_CLEAN)) { int need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); if (need_write) clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); spin_unlock_irqrestore(&bitmap->lock, flags); if (need_write) { write_page(bitmap, page, 0); bitmap->allclean = 0; } spin_lock_irqsave(&bitmap->lock, flags); j |= (PAGE_BITS - 1); continue; } /* grab the new page, sync and release the old */ if (lastpage != NULL) { if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); spin_unlock_irqrestore(&bitmap->lock, flags); write_page(bitmap, lastpage, 0); } else { set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); spin_unlock_irqrestore(&bitmap->lock, flags); } } else spin_unlock_irqrestore(&bitmap->lock, flags); lastpage = page; /* We are possibly going to clear some bits, so make * sure that events_cleared is up-to-date. */ if (bitmap->need_sync && bitmap->mddev->bitmap_info.external == 0) { bitmap_super_t *sb; bitmap->need_sync = 0; sb = kmap_atomic(bitmap->sb_page, KM_USER0); sb->events_cleared = cpu_to_le64(bitmap->events_cleared); kunmap_atomic(sb, KM_USER0); write_page(bitmap, bitmap->sb_page, 1); } spin_lock_irqsave(&bitmap->lock, flags); if (!bitmap->need_sync) clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); } bmc = bitmap_get_counter(bitmap, (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap), &blocks, 0); if (bmc) { if (*bmc) bitmap->allclean = 0; if (*bmc == 2) { *bmc = 1; /* maybe clear the bit next time */ set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); } else if (*bmc == 1 && !bitmap->need_sync) { /* we can clear the bit */ *bmc = 0; bitmap_count_page(bitmap, (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap), -1); /* clear the bit */ if (page) { paddr = kmap_atomic(page, KM_USER0); if (bitmap->flags & BITMAP_HOSTENDIAN) clear_bit(file_page_offset(bitmap, j), paddr); else ext2_clear_bit(file_page_offset(bitmap, j), paddr); kunmap_atomic(paddr, KM_USER0); } else log->type->clear_region(log, j); } } else j |= PAGE_COUNTER_MASK; } spin_unlock_irqrestore(&bitmap->lock, flags); /* now sync the final page */ if (lastpage != NULL || log != NULL) { spin_lock_irqsave(&bitmap->lock, flags); if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); spin_unlock_irqrestore(&bitmap->lock, flags); if (lastpage) write_page(bitmap, lastpage, 0); else if (log->type->flush(log)) bitmap->flags |= BITMAP_WRITE_ERROR; } else { set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); spin_unlock_irqrestore(&bitmap->lock, flags); } } done: if (bitmap->allclean == 0) bitmap->mddev->thread->timeout = bitmap->mddev->bitmap_info.daemon_sleep; mutex_unlock(&mddev->bitmap_info.mutex); } static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int create) __releases(bitmap->lock) __acquires(bitmap->lock) { /* If 'create', we might release the lock and reclaim it. * The lock must have been taken with interrupts enabled. * If !create, we don't release the lock. */ sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap); unsigned long page = chunk >> PAGE_COUNTER_SHIFT; unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT; sector_t csize; int err; err = bitmap_checkpage(bitmap, page, create); if (bitmap->bp[page].hijacked || bitmap->bp[page].map == NULL) csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1); else csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap)); *blocks = csize - (offset & (csize - 1)); if (err < 0) return NULL; /* now locked ... */ if (bitmap->bp[page].hijacked) { /* hijacked pointer */ /* should we use the first or second counter field * of the hijacked pointer? */ int hi = (pageoff > PAGE_COUNTER_MASK); return &((bitmap_counter_t *) &bitmap->bp[page].map)[hi]; } else /* page is allocated */ return (bitmap_counter_t *) &(bitmap->bp[page].map[pageoff]); } int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) { if (!bitmap) return 0; if (behind) { int bw; atomic_inc(&bitmap->behind_writes); bw = atomic_read(&bitmap->behind_writes); if (bw > bitmap->behind_writes_used) bitmap->behind_writes_used = bw; PRINTK(KERN_DEBUG "inc write-behind count %d/%d\n", bw, bitmap->max_write_behind); } while (sectors) { sector_t blocks; bitmap_counter_t *bmc; spin_lock_irq(&bitmap->lock); bmc = bitmap_get_counter(bitmap, offset, &blocks, 1); if (!bmc) { spin_unlock_irq(&bitmap->lock); return 0; } if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) { DEFINE_WAIT(__wait); /* note that it is safe to do the prepare_to_wait * after the test as long as we do it before dropping * the spinlock. */ prepare_to_wait(&bitmap->overflow_wait, &__wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(&bitmap->lock); md_unplug(bitmap->mddev); schedule(); finish_wait(&bitmap->overflow_wait, &__wait); continue; } switch (*bmc) { case 0: bitmap_file_set_bit(bitmap, offset); bitmap_count_page(bitmap, offset, 1); /* fall through */ case 1: *bmc = 2; } (*bmc)++; spin_unlock_irq(&bitmap->lock); offset += blocks; if (sectors > blocks) sectors -= blocks; else sectors = 0; } bitmap->allclean = 0; return 0; } EXPORT_SYMBOL(bitmap_startwrite); void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int success, int behind) { if (!bitmap) return; if (behind) { if (atomic_dec_and_test(&bitmap->behind_writes)) wake_up(&bitmap->behind_wait); PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n", atomic_read(&bitmap->behind_writes), bitmap->max_write_behind); } if (bitmap->mddev->degraded) /* Never clear bits or update events_cleared when degraded */ success = 0; while (sectors) { sector_t blocks; unsigned long flags; bitmap_counter_t *bmc; spin_lock_irqsave(&bitmap->lock, flags); bmc = bitmap_get_counter(bitmap, offset, &blocks, 0); if (!bmc) { spin_unlock_irqrestore(&bitmap->lock, flags); return; } if (success && bitmap->events_cleared < bitmap->mddev->events) { bitmap->events_cleared = bitmap->mddev->events; bitmap->need_sync = 1; sysfs_notify_dirent_safe(bitmap->sysfs_can_clear); } if (!success && ! (*bmc & NEEDED_MASK)) *bmc |= NEEDED_MASK; if ((*bmc & COUNTER_MAX) == COUNTER_MAX) wake_up(&bitmap->overflow_wait); (*bmc)--; if (*bmc <= 2) set_page_attr(bitmap, filemap_get_page( bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), BITMAP_PAGE_CLEAN); spin_unlock_irqrestore(&bitmap->lock, flags); offset += blocks; if (sectors > blocks) sectors -= blocks; else sectors = 0; } } EXPORT_SYMBOL(bitmap_endwrite); static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded) { bitmap_counter_t *bmc; int rv; if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */ *blocks = 1024; return 1; /* always resync if no bitmap */ } spin_lock_irq(&bitmap->lock); bmc = bitmap_get_counter(bitmap, offset, blocks, 0); rv = 0; if (bmc) { /* locked */ if (RESYNC(*bmc)) rv = 1; else if (NEEDED(*bmc)) { rv = 1; if (!degraded) { /* don't set/clear bits if degraded */ *bmc |= RESYNC_MASK; *bmc &= ~NEEDED_MASK; } } } spin_unlock_irq(&bitmap->lock); bitmap->allclean = 0; return rv; } int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded) { /* bitmap_start_sync must always report on multiples of whole * pages, otherwise resync (which is very PAGE_SIZE based) will * get confused. * So call __bitmap_start_sync repeatedly (if needed) until * At least PAGE_SIZE>>9 blocks are covered. * Return the 'or' of the result. */ int rv = 0; sector_t blocks1; *blocks = 0; while (*blocks < (PAGE_SIZE>>9)) { rv |= __bitmap_start_sync(bitmap, offset, &blocks1, degraded); offset += blocks1; *blocks += blocks1; } return rv; } EXPORT_SYMBOL(bitmap_start_sync); void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted) { bitmap_counter_t *bmc; unsigned long flags; if (bitmap == NULL) { *blocks = 1024; return; } spin_lock_irqsave(&bitmap->lock, flags); bmc = bitmap_get_counter(bitmap, offset, blocks, 0); if (bmc == NULL) goto unlock; /* locked */ if (RESYNC(*bmc)) { *bmc &= ~RESYNC_MASK; if (!NEEDED(*bmc) && aborted) *bmc |= NEEDED_MASK; else { if (*bmc <= 2) set_page_attr(bitmap, filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), BITMAP_PAGE_CLEAN); } } unlock: spin_unlock_irqrestore(&bitmap->lock, flags); bitmap->allclean = 0; } EXPORT_SYMBOL(bitmap_end_sync); void bitmap_close_sync(struct bitmap *bitmap) { /* Sync has finished, and any bitmap chunks that weren't synced * properly have been aborted. It remains to us to clear the * RESYNC bit wherever it is still on */ sector_t sector = 0; sector_t blocks; if (!bitmap) return; while (sector < bitmap->mddev->resync_max_sectors) { bitmap_end_sync(bitmap, sector, &blocks, 0); sector += blocks; } } EXPORT_SYMBOL(bitmap_close_sync); void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) { sector_t s = 0; sector_t blocks; if (!bitmap) return; if (sector == 0) { bitmap->last_end_sync = jiffies; return; } if (time_before(jiffies, (bitmap->last_end_sync + bitmap->mddev->bitmap_info.daemon_sleep))) return; wait_event(bitmap->mddev->recovery_wait, atomic_read(&bitmap->mddev->recovery_active) == 0); bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); s = 0; while (s < sector && s < bitmap->mddev->resync_max_sectors) { bitmap_end_sync(bitmap, s, &blocks, 0); s += blocks; } bitmap->last_end_sync = jiffies; sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); } EXPORT_SYMBOL(bitmap_cond_end_sync); static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) { /* For each chunk covered by any of these sectors, set the * counter to 1 and set resync_needed. They should all * be 0 at this point */ sector_t secs; bitmap_counter_t *bmc; spin_lock_irq(&bitmap->lock); bmc = bitmap_get_counter(bitmap, offset, &secs, 1); if (!bmc) { spin_unlock_irq(&bitmap->lock); return; } if (!*bmc) { struct page *page; *bmc = 1 | (needed ? NEEDED_MASK : 0); bitmap_count_page(bitmap, offset, 1); page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)); set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); } spin_unlock_irq(&bitmap->lock); bitmap->allclean = 0; } /* dirty the memory and file bits for bitmap chunks "s" to "e" */ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) { unsigned long chunk; for (chunk = s; chunk <= e; chunk++) { sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap); bitmap_set_memory_bits(bitmap, sec, 1); bitmap_file_set_bit(bitmap, sec); if (sec < bitmap->mddev->recovery_cp) /* We are asserting that the array is dirty, * so move the recovery_cp address back so * that it is obvious that it is dirty */ bitmap->mddev->recovery_cp = sec; } } /* * flush out any pending updates */ void bitmap_flush(mddev_t *mddev) { struct bitmap *bitmap = mddev->bitmap; long sleep; if (!bitmap) /* there was no bitmap */ return; /* run the daemon_work three time to ensure everything is flushed * that can be */ sleep = mddev->bitmap_info.daemon_sleep * 2; bitmap->daemon_lastrun -= sleep; bitmap_daemon_work(mddev); bitmap->daemon_lastrun -= sleep; bitmap_daemon_work(mddev); bitmap->daemon_lastrun -= sleep; bitmap_daemon_work(mddev); bitmap_update_sb(bitmap); } /* * free memory that was allocated */ static void bitmap_free(struct bitmap *bitmap) { unsigned long k, pages; struct bitmap_page *bp; if (!bitmap) /* there was no bitmap */ return; /* release the bitmap file and kill the daemon */ bitmap_file_put(bitmap); bp = bitmap->bp; pages = bitmap->pages; /* free all allocated memory */ if (bp) /* deallocate the page memory */ for (k = 0; k < pages; k++) if (bp[k].map && !bp[k].hijacked) kfree(bp[k].map); kfree(bp); kfree(bitmap); } void bitmap_destroy(mddev_t *mddev) { struct bitmap *bitmap = mddev->bitmap; if (!bitmap) /* there was no bitmap */ return; mutex_lock(&mddev->bitmap_info.mutex); mddev->bitmap = NULL; /* disconnect from the md device */ mutex_unlock(&mddev->bitmap_info.mutex); if (mddev->thread) mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; if (bitmap->sysfs_can_clear) sysfs_put(bitmap->sysfs_can_clear); bitmap_free(bitmap); } /* * initialize the bitmap structure * if this returns an error, bitmap_destroy must be called to do clean up */ int bitmap_create(mddev_t *mddev) { struct bitmap *bitmap; sector_t blocks = mddev->resync_max_sectors; unsigned long chunks; unsigned long pages; struct file *file = mddev->bitmap_info.file; int err; struct sysfs_dirent *bm = NULL; BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); if (!file && !mddev->bitmap_info.offset && !mddev->bitmap_info.log) /* bitmap disabled, nothing to do */ return 0; BUG_ON(file && mddev->bitmap_info.offset); BUG_ON(mddev->bitmap_info.offset && mddev->bitmap_info.log); bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); if (!bitmap) return -ENOMEM; spin_lock_init(&bitmap->lock); atomic_set(&bitmap->pending_writes, 0); init_waitqueue_head(&bitmap->write_wait); init_waitqueue_head(&bitmap->overflow_wait); init_waitqueue_head(&bitmap->behind_wait); bitmap->mddev = mddev; if (mddev->kobj.sd) bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap"); if (bm) { bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear"); sysfs_put(bm); } else bitmap->sysfs_can_clear = NULL; bitmap->file = file; if (file) { get_file(file); /* As future accesses to this file will use bmap, * and bypass the page cache, we must sync the file * first. */ vfs_fsync(file, 1); } /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ if (!mddev->bitmap_info.external) err = bitmap_read_sb(bitmap); else { err = 0; if (mddev->bitmap_info.chunksize == 0 || mddev->bitmap_info.daemon_sleep == 0) /* chunksize and time_base need to be * set first. */ err = -EINVAL; } if (err) goto error; bitmap->daemon_lastrun = jiffies; bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize); /* now that chunksize and chunkshift are set, we can use these macros */ chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >> CHUNK_BLOCK_SHIFT(bitmap); pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; BUG_ON(!pages); bitmap->chunks = chunks; bitmap->pages = pages; bitmap->missing_pages = pages; bitmap->counter_bits = COUNTER_BITS; bitmap->syncchunk = ~0UL; #ifdef INJECT_FATAL_FAULT_1 bitmap->bp = NULL; #else bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); #endif err = -ENOMEM; if (!bitmap->bp) goto error; printk(KERN_INFO "created bitmap (%lu pages) for device %s\n", pages, bmname(bitmap)); mddev->bitmap = bitmap; return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0; error: bitmap_free(bitmap); return err; } int bitmap_load(mddev_t *mddev) { int err = 0; sector_t sector = 0; struct bitmap *bitmap = mddev->bitmap; if (!bitmap) goto out; /* Clear out old bitmap info first: Either there is none, or we * are resuming after someone else has possibly changed things, * so we should forget old cached info. * All chunks should be clean, but some might need_sync. */ while (sector < mddev->resync_max_sectors) { sector_t blocks; bitmap_start_sync(bitmap, sector, &blocks, 0); sector += blocks; } bitmap_close_sync(bitmap); if (mddev->bitmap_info.log) { unsigned long i; struct dm_dirty_log *log = mddev->bitmap_info.log; for (i = 0; i < bitmap->chunks; i++) if (!log->type->in_sync(log, i, 1)) bitmap_set_memory_bits(bitmap, (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap), 1); } else { sector_t start = 0; if (mddev->degraded == 0 || bitmap->events_cleared == mddev->events) /* no need to keep dirty bits to optimise a * re-add of a missing device */ start = mddev->recovery_cp; err = bitmap_init_from_disk(bitmap, start); } if (err) goto out; mddev->thread->timeout = mddev->bitmap_info.daemon_sleep; md_wakeup_thread(mddev->thread); bitmap_update_sb(bitmap); if (bitmap->flags & BITMAP_WRITE_ERROR) err = -EIO; out: return err; } EXPORT_SYMBOL_GPL(bitmap_load); static ssize_t location_show(mddev_t *mddev, char *page) { ssize_t len; if (mddev->bitmap_info.file) len = sprintf(page, "file"); else if (mddev->bitmap_info.offset) len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); else len = sprintf(page, "none"); len += sprintf(page+len, "\n"); return len; } static ssize_t location_store(mddev_t *mddev, const char *buf, size_t len) { if (mddev->pers) { if (!mddev->pers->quiesce) return -EBUSY; if (mddev->recovery || mddev->sync_thread) return -EBUSY; } if (mddev->bitmap || mddev->bitmap_info.file || mddev->bitmap_info.offset) { /* bitmap already configured. Only option is to clear it */ if (strncmp(buf, "none", 4) != 0) return -EBUSY; if (mddev->pers) { mddev->pers->quiesce(mddev, 1); bitmap_destroy(mddev); mddev->pers->quiesce(mddev, 0); } mddev->bitmap_info.offset = 0; if (mddev->bitmap_info.file) { struct file *f = mddev->bitmap_info.file; mddev->bitmap_info.file = NULL; restore_bitmap_write_access(f); fput(f); } } else { /* No bitmap, OK to set a location */ long long offset; if (strncmp(buf, "none", 4) == 0) /* nothing to be done */; else if (strncmp(buf, "file:", 5) == 0) { /* Not supported yet */ return -EINVAL; } else { int rv; if (buf[0] == '+') rv = strict_strtoll(buf+1, 10, &offset); else rv = strict_strtoll(buf, 10, &offset); if (rv) return rv; if (offset == 0) return -EINVAL; if (mddev->bitmap_info.external == 0 && mddev->major_version == 0 && offset != mddev->bitmap_info.default_offset) return -EINVAL; mddev->bitmap_info.offset = offset; if (mddev->pers) { mddev->pers->quiesce(mddev, 1); rv = bitmap_create(mddev); if (rv) { bitmap_destroy(mddev); mddev->bitmap_info.offset = 0; } mddev->pers->quiesce(mddev, 0); if (rv) return rv; } } } if (!mddev->external) { /* Ensure new bitmap info is stored in * metadata promptly. */ set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); } return len; } static struct md_sysfs_entry bitmap_location = __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store); static ssize_t timeout_show(mddev_t *mddev, char *page) { ssize_t len; unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; len = sprintf(page, "%lu", secs); if (jifs) len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs)); len += sprintf(page+len, "\n"); return len; } static ssize_t timeout_store(mddev_t *mddev, const char *buf, size_t len) { /* timeout can be set at any time */ unsigned long timeout; int rv = strict_strtoul_scaled(buf, &timeout, 4); if (rv) return rv; /* just to make sure we don't overflow... */ if (timeout >= LONG_MAX / HZ) return -EINVAL; timeout = timeout * HZ / 10000; if (timeout >= MAX_SCHEDULE_TIMEOUT) timeout = MAX_SCHEDULE_TIMEOUT-1; if (timeout < 1) timeout = 1; mddev->bitmap_info.daemon_sleep = timeout; if (mddev->thread) { /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then * the bitmap is all clean and we don't need to * adjust the timeout right now */ if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) { mddev->thread->timeout = timeout; md_wakeup_thread(mddev->thread); } } return len; } static struct md_sysfs_entry bitmap_timeout = __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store); static ssize_t backlog_show(mddev_t *mddev, char *page) { return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); } static ssize_t backlog_store(mddev_t *mddev, const char *buf, size_t len) { unsigned long backlog; int rv = strict_strtoul(buf, 10, &backlog); if (rv) return rv; if (backlog > COUNTER_MAX) return -EINVAL; mddev->bitmap_info.max_write_behind = backlog; return len; } static struct md_sysfs_entry bitmap_backlog = __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store); static ssize_t chunksize_show(mddev_t *mddev, char *page) { return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); } static ssize_t chunksize_store(mddev_t *mddev, const char *buf, size_t len) { /* Can only be changed when no bitmap is active */ int rv; unsigned long csize; if (mddev->bitmap) return -EBUSY; rv = strict_strtoul(buf, 10, &csize); if (rv) return rv; if (csize < 512 || !is_power_of_2(csize)) return -EINVAL; mddev->bitmap_info.chunksize = csize; return len; } static struct md_sysfs_entry bitmap_chunksize = __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store); static ssize_t metadata_show(mddev_t *mddev, char *page) { return sprintf(page, "%s\n", (mddev->bitmap_info.external ? "external" : "internal")); } static ssize_t metadata_store(mddev_t *mddev, const char *buf, size_t len) { if (mddev->bitmap || mddev->bitmap_info.file || mddev->bitmap_info.offset) return -EBUSY; if (strncmp(buf, "external", 8) == 0) mddev->bitmap_info.external = 1; else if (strncmp(buf, "internal", 8) == 0) mddev->bitmap_info.external = 0; else return -EINVAL; return len; } static struct md_sysfs_entry bitmap_metadata = __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store); static ssize_t can_clear_show(mddev_t *mddev, char *page) { int len; if (mddev->bitmap) len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ? "false" : "true")); else len = sprintf(page, "\n"); return len; } static ssize_t can_clear_store(mddev_t *mddev, const char *buf, size_t len) { if (mddev->bitmap == NULL) return -ENOENT; if (strncmp(buf, "false", 5) == 0) mddev->bitmap->need_sync = 1; else if (strncmp(buf, "true", 4) == 0) { if (mddev->degraded) return -EBUSY; mddev->bitmap->need_sync = 0; } else return -EINVAL; return len; } static struct md_sysfs_entry bitmap_can_clear = __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store); static ssize_t behind_writes_used_show(mddev_t *mddev, char *page) { if (mddev->bitmap == NULL) return sprintf(page, "0\n"); return sprintf(page, "%lu\n", mddev->bitmap->behind_writes_used); } static ssize_t behind_writes_used_reset(mddev_t *mddev, const char *buf, size_t len) { if (mddev->bitmap) mddev->bitmap->behind_writes_used = 0; return len; } static struct md_sysfs_entry max_backlog_used = __ATTR(max_backlog_used, S_IRUGO | S_IWUSR, behind_writes_used_show, behind_writes_used_reset); static struct attribute *md_bitmap_attrs[] = { &bitmap_location.attr, &bitmap_timeout.attr, &bitmap_backlog.attr, &bitmap_chunksize.attr, &bitmap_metadata.attr, &bitmap_can_clear.attr, &max_backlog_used.attr, NULL }; struct attribute_group md_bitmap_group = { .name = "bitmap", .attrs = md_bitmap_attrs, };
gpl-2.0
01org/KVMGT-qemu
target-microblaze/cpu.c
51
5121
/* * QEMU MicroBlaze CPU * * Copyright (c) 2009 Edgar E. Iglesias * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd. * Copyright (c) 2012 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see * <http://www.gnu.org/licenses/lgpl-2.1.html> */ #include "cpu.h" #include "qemu-common.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" static void mb_cpu_set_pc(CPUState *cs, vaddr value) { MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); cpu->env.sregs[SR_PC] = value; } /* CPUClass::reset() */ static void mb_cpu_reset(CPUState *s) { MicroBlazeCPU *cpu = MICROBLAZE_CPU(s); MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_GET_CLASS(cpu); CPUMBState *env = &cpu->env; mcc->parent_reset(s); memset(env, 0, offsetof(CPUMBState, breakpoints)); env->res_addr = RES_ADDR_NONE; tlb_flush(env, 1); /* Disable stack protector. */ env->shr = ~0; env->pvr.regs[0] = PVR0_PVR_FULL_MASK \ | PVR0_USE_BARREL_MASK \ | PVR0_USE_DIV_MASK \ | PVR0_USE_HW_MUL_MASK \ | PVR0_USE_EXC_MASK \ | PVR0_USE_ICACHE_MASK \ | PVR0_USE_DCACHE_MASK \ | PVR0_USE_MMU \ | (0xb << 8); env->pvr.regs[2] = PVR2_D_OPB_MASK \ | PVR2_D_LMB_MASK \ | PVR2_I_OPB_MASK \ | PVR2_I_LMB_MASK \ | PVR2_USE_MSR_INSTR \ | PVR2_USE_PCMP_INSTR \ | PVR2_USE_BARREL_MASK \ | PVR2_USE_DIV_MASK \ | PVR2_USE_HW_MUL_MASK \ | PVR2_USE_MUL64_MASK \ | PVR2_USE_FPU_MASK \ | PVR2_USE_FPU2_MASK \ | PVR2_FPU_EXC_MASK \ | 0; env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family. */ env->pvr.regs[11] = PVR11_USE_MMU | (16 << 17); #if defined(CONFIG_USER_ONLY) /* start in user mode with interrupts enabled. */ env->sregs[SR_MSR] = MSR_EE | MSR_IE | MSR_VM | MSR_UM; env->pvr.regs[10] = 0x0c000000; /* Spartan 3a dsp. */ #else env->sregs[SR_MSR] = 0; mmu_init(&env->mmu); env->mmu.c_mmu = 3; env->mmu.c_mmu_tlb_access = 3; env->mmu.c_mmu_zones = 16; #endif } static void mb_cpu_realizefn(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_GET_CLASS(dev); cpu_reset(cs); qemu_init_vcpu(cs); mcc->parent_realize(dev, errp); } static void mb_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj); CPUMBState *env = &cpu->env; static bool tcg_initialized; cs->env_ptr = env; cpu_exec_init(env); set_float_rounding_mode(float_round_nearest_even, &env->fp_status); if (tcg_enabled() && !tcg_initialized) { tcg_initialized = true; mb_tcg_init(); } } static const VMStateDescription vmstate_mb_cpu = { .name = "cpu", .unmigratable = 1, }; static Property mb_properties[] = { DEFINE_PROP_UINT32("xlnx.base-vectors", MicroBlazeCPU, base_vectors, 0), DEFINE_PROP_END_OF_LIST(), }; static void mb_cpu_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_CLASS(oc); mcc->parent_realize = dc->realize; dc->realize = mb_cpu_realizefn; mcc->parent_reset = cc->reset; cc->reset = mb_cpu_reset; cc->do_interrupt = mb_cpu_do_interrupt; cc->dump_state = mb_cpu_dump_state; cc->set_pc = mb_cpu_set_pc; cc->gdb_read_register = mb_cpu_gdb_read_register; cc->gdb_write_register = mb_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY cc->do_unassigned_access = mb_cpu_unassigned_access; cc->get_phys_page_debug = mb_cpu_get_phys_page_debug; #endif dc->vmsd = &vmstate_mb_cpu; dc->props = mb_properties; cc->gdb_num_core_regs = 32 + 5; } static const TypeInfo mb_cpu_type_info = { .name = TYPE_MICROBLAZE_CPU, .parent = TYPE_CPU, .instance_size = sizeof(MicroBlazeCPU), .instance_init = mb_cpu_initfn, .class_size = sizeof(MicroBlazeCPUClass), .class_init = mb_cpu_class_init, }; static void mb_cpu_register_types(void) { type_register_static(&mb_cpu_type_info); } type_init(mb_cpu_register_types)
gpl-2.0
CYB0RG97/kernel_nvidia_s8515
drivers/hwmon/applesmc.c
307
32501
/* * drivers/hwmon/applesmc.c - driver for Apple's SMC (accelerometer, temperature * sensors, fan control, keyboard backlight control) used in Intel-based Apple * computers. * * Copyright (C) 2007 Nicolas Boichat <nicolas@boichat.ch> * Copyright (C) 2010 Henrik Rydberg <rydberg@euromail.se> * * Based on hdaps.c driver: * Copyright (C) 2005 Robert Love <rml@novell.com> * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com> * * Fan control based on smcFanControl: * Copyright (C) 2006 Hendrik Holtmann <holtmann@mac.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License v2 as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/input-polldev.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/dmi.h> #include <linux/mutex.h> #include <linux/hwmon-sysfs.h> #include <linux/io.h> #include <linux/leds.h> #include <linux/hwmon.h> #include <linux/workqueue.h> /* data port used by Apple SMC */ #define APPLESMC_DATA_PORT 0x300 /* command/status port used by Apple SMC */ #define APPLESMC_CMD_PORT 0x304 #define APPLESMC_NR_PORTS 32 /* 0x300-0x31f */ #define APPLESMC_MAX_DATA_LENGTH 32 /* wait up to 32 ms for a status change. */ #define APPLESMC_MIN_WAIT 0x0040 #define APPLESMC_MAX_WAIT 0x8000 #define APPLESMC_STATUS_MASK 0x0f #define APPLESMC_READ_CMD 0x10 #define APPLESMC_WRITE_CMD 0x11 #define APPLESMC_GET_KEY_BY_INDEX_CMD 0x12 #define APPLESMC_GET_KEY_TYPE_CMD 0x13 #define KEY_COUNT_KEY "#KEY" /* r-o ui32 */ #define LIGHT_SENSOR_LEFT_KEY "ALV0" /* r-o {alv (6-10 bytes) */ #define LIGHT_SENSOR_RIGHT_KEY "ALV1" /* r-o {alv (6-10 bytes) */ #define BACKLIGHT_KEY "LKSB" /* w-o {lkb (2 bytes) */ #define CLAMSHELL_KEY "MSLD" /* r-o ui8 (unused) */ #define MOTION_SENSOR_X_KEY "MO_X" /* r-o sp78 (2 bytes) */ #define MOTION_SENSOR_Y_KEY "MO_Y" /* r-o sp78 (2 bytes) */ #define MOTION_SENSOR_Z_KEY "MO_Z" /* r-o sp78 (2 bytes) */ #define MOTION_SENSOR_KEY "MOCN" /* r/w ui16 */ #define FANS_COUNT "FNum" /* r-o ui8 */ #define FANS_MANUAL "FS! " /* r-w ui16 */ #define FAN_ID_FMT "F%dID" /* r-o char[16] */ /* List of keys used to read/write fan speeds */ static const char *const fan_speed_fmt[] = { "F%dAc", /* actual speed */ "F%dMn", /* minimum speed (rw) */ "F%dMx", /* maximum speed */ "F%dSf", /* safe speed - not all models */ "F%dTg", /* target speed (manual: rw) */ }; #define INIT_TIMEOUT_MSECS 5000 /* wait up to 5s for device init ... */ #define INIT_WAIT_MSECS 50 /* ... in 50ms increments */ #define APPLESMC_POLL_INTERVAL 50 /* msecs */ #define APPLESMC_INPUT_FUZZ 4 /* input event threshold */ #define APPLESMC_INPUT_FLAT 4 #define SENSOR_X 0 #define SENSOR_Y 1 #define SENSOR_Z 2 #define to_index(attr) (to_sensor_dev_attr(attr)->index & 0xffff) #define to_option(attr) (to_sensor_dev_attr(attr)->index >> 16) /* Dynamic device node attributes */ struct applesmc_dev_attr { struct sensor_device_attribute sda; /* hwmon attributes */ char name[32]; /* room for node file name */ }; /* Dynamic device node group */ struct applesmc_node_group { char *format; /* format string */ void *show; /* show function */ void *store; /* store function */ int option; /* function argument */ struct applesmc_dev_attr *nodes; /* dynamic node array */ }; /* AppleSMC entry - cached register information */ struct applesmc_entry { char key[5]; /* four-letter key code */ u8 valid; /* set when entry is successfully read once */ u8 len; /* bounded by APPLESMC_MAX_DATA_LENGTH */ char type[5]; /* four-letter type code */ u8 flags; /* 0x10: func; 0x40: write; 0x80: read */ }; /* Register lookup and registers common to all SMCs */ static struct applesmc_registers { struct mutex mutex; /* register read/write mutex */ unsigned int key_count; /* number of SMC registers */ unsigned int fan_count; /* number of fans */ unsigned int temp_count; /* number of temperature registers */ unsigned int temp_begin; /* temperature lower index bound */ unsigned int temp_end; /* temperature upper index bound */ int num_light_sensors; /* number of light sensors */ bool has_accelerometer; /* has motion sensor */ bool has_key_backlight; /* has keyboard backlight */ bool init_complete; /* true when fully initialized */ struct applesmc_entry *cache; /* cached key entries */ } smcreg = { .mutex = __MUTEX_INITIALIZER(smcreg.mutex), }; static const int debug; static struct platform_device *pdev; static s16 rest_x; static s16 rest_y; static u8 backlight_state[2]; static struct device *hwmon_dev; static struct input_polled_dev *applesmc_idev; /* * Last index written to key_at_index sysfs file, and value to use for all other * key_at_index_* sysfs files. */ static unsigned int key_at_index; static struct workqueue_struct *applesmc_led_wq; /* * __wait_status - Wait up to 32ms for the status port to get a certain value * (masked with 0x0f), returning zero if the value is obtained. Callers must * hold applesmc_lock. */ static int __wait_status(u8 val) { int us; val = val & APPLESMC_STATUS_MASK; for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) { udelay(us); if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == val) return 0; } return -EIO; } /* * special treatment of command port - on newer macbooks, it seems necessary * to resend the command byte before polling the status again. Callers must * hold applesmc_lock. */ static int send_command(u8 cmd) { int us; for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) { outb(cmd, APPLESMC_CMD_PORT); udelay(us); if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == 0x0c) return 0; } return -EIO; } static int send_argument(const char *key) { int i; for (i = 0; i < 4; i++) { outb(key[i], APPLESMC_DATA_PORT); if (__wait_status(0x04)) return -EIO; } return 0; } static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) { int i; if (send_command(cmd) || send_argument(key)) { pr_warn("%.4s: read arg fail\n", key); return -EIO; } outb(len, APPLESMC_DATA_PORT); for (i = 0; i < len; i++) { if (__wait_status(0x05)) { pr_warn("%.4s: read data fail\n", key); return -EIO; } buffer[i] = inb(APPLESMC_DATA_PORT); } return 0; } static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len) { int i; if (send_command(cmd) || send_argument(key)) { pr_warn("%s: write arg fail\n", key); return -EIO; } outb(len, APPLESMC_DATA_PORT); for (i = 0; i < len; i++) { if (__wait_status(0x04)) { pr_warn("%s: write data fail\n", key); return -EIO; } outb(buffer[i], APPLESMC_DATA_PORT); } return 0; } static int read_register_count(unsigned int *count) { __be32 be; int ret; ret = read_smc(APPLESMC_READ_CMD, KEY_COUNT_KEY, (u8 *)&be, 4); if (ret) return ret; *count = be32_to_cpu(be); return 0; } /* * Serialized I/O * * Returns zero on success or a negative error on failure. * All functions below are concurrency safe - callers should NOT hold lock. */ static int applesmc_read_entry(const struct applesmc_entry *entry, u8 *buf, u8 len) { int ret; if (entry->len != len) return -EINVAL; mutex_lock(&smcreg.mutex); ret = read_smc(APPLESMC_READ_CMD, entry->key, buf, len); mutex_unlock(&smcreg.mutex); return ret; } static int applesmc_write_entry(const struct applesmc_entry *entry, const u8 *buf, u8 len) { int ret; if (entry->len != len) return -EINVAL; mutex_lock(&smcreg.mutex); ret = write_smc(APPLESMC_WRITE_CMD, entry->key, buf, len); mutex_unlock(&smcreg.mutex); return ret; } static const struct applesmc_entry *applesmc_get_entry_by_index(int index) { struct applesmc_entry *cache = &smcreg.cache[index]; u8 key[4], info[6]; __be32 be; int ret = 0; if (cache->valid) return cache; mutex_lock(&smcreg.mutex); if (cache->valid) goto out; be = cpu_to_be32(index); ret = read_smc(APPLESMC_GET_KEY_BY_INDEX_CMD, (u8 *)&be, key, 4); if (ret) goto out; ret = read_smc(APPLESMC_GET_KEY_TYPE_CMD, key, info, 6); if (ret) goto out; memcpy(cache->key, key, 4); cache->len = info[0]; memcpy(cache->type, &info[1], 4); cache->flags = info[5]; cache->valid = 1; out: mutex_unlock(&smcreg.mutex); if (ret) return ERR_PTR(ret); return cache; } static int applesmc_get_lower_bound(unsigned int *lo, const char *key) { int begin = 0, end = smcreg.key_count; const struct applesmc_entry *entry; while (begin != end) { int middle = begin + (end - begin) / 2; entry = applesmc_get_entry_by_index(middle); if (IS_ERR(entry)) { *lo = 0; return PTR_ERR(entry); } if (strcmp(entry->key, key) < 0) begin = middle + 1; else end = middle; } *lo = begin; return 0; } static int applesmc_get_upper_bound(unsigned int *hi, const char *key) { int begin = 0, end = smcreg.key_count; const struct applesmc_entry *entry; while (begin != end) { int middle = begin + (end - begin) / 2; entry = applesmc_get_entry_by_index(middle); if (IS_ERR(entry)) { *hi = smcreg.key_count; return PTR_ERR(entry); } if (strcmp(key, entry->key) < 0) end = middle; else begin = middle + 1; } *hi = begin; return 0; } static const struct applesmc_entry *applesmc_get_entry_by_key(const char *key) { int begin, end; int ret; ret = applesmc_get_lower_bound(&begin, key); if (ret) return ERR_PTR(ret); ret = applesmc_get_upper_bound(&end, key); if (ret) return ERR_PTR(ret); if (end - begin != 1) return ERR_PTR(-EINVAL); return applesmc_get_entry_by_index(begin); } static int applesmc_read_key(const char *key, u8 *buffer, u8 len) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_key(key); if (IS_ERR(entry)) return PTR_ERR(entry); return applesmc_read_entry(entry, buffer, len); } static int applesmc_write_key(const char *key, const u8 *buffer, u8 len) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_key(key); if (IS_ERR(entry)) return PTR_ERR(entry); return applesmc_write_entry(entry, buffer, len); } static int applesmc_has_key(const char *key, bool *value) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_key(key); if (IS_ERR(entry) && PTR_ERR(entry) != -EINVAL) return PTR_ERR(entry); *value = !IS_ERR(entry); return 0; } /* * applesmc_read_motion_sensor - Read motion sensor (X, Y or Z). */ static int applesmc_read_motion_sensor(int index, s16 *value) { u8 buffer[2]; int ret; switch (index) { case SENSOR_X: ret = applesmc_read_key(MOTION_SENSOR_X_KEY, buffer, 2); break; case SENSOR_Y: ret = applesmc_read_key(MOTION_SENSOR_Y_KEY, buffer, 2); break; case SENSOR_Z: ret = applesmc_read_key(MOTION_SENSOR_Z_KEY, buffer, 2); break; default: ret = -EINVAL; } *value = ((s16)buffer[0] << 8) | buffer[1]; return ret; } /* * applesmc_device_init - initialize the accelerometer. Can sleep. */ static void applesmc_device_init(void) { int total; u8 buffer[2]; if (!smcreg.has_accelerometer) return; for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) { if (!applesmc_read_key(MOTION_SENSOR_KEY, buffer, 2) && (buffer[0] != 0x00 || buffer[1] != 0x00)) return; buffer[0] = 0xe0; buffer[1] = 0x00; applesmc_write_key(MOTION_SENSOR_KEY, buffer, 2); msleep(INIT_WAIT_MSECS); } pr_warn("failed to init the device\n"); } /* * applesmc_init_smcreg_try - Try to initialize register cache. Idempotent. */ static int applesmc_init_smcreg_try(void) { struct applesmc_registers *s = &smcreg; bool left_light_sensor, right_light_sensor; unsigned int count; u8 tmp[1]; int ret; if (s->init_complete) return 0; ret = read_register_count(&count); if (ret) return ret; if (s->cache && s->key_count != count) { pr_warn("key count changed from %d to %d\n", s->key_count, count); kfree(s->cache); s->cache = NULL; } s->key_count = count; if (!s->cache) s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); if (!s->cache) return -ENOMEM; ret = applesmc_read_key(FANS_COUNT, tmp, 1); if (ret) return ret; s->fan_count = tmp[0]; ret = applesmc_get_lower_bound(&s->temp_begin, "T"); if (ret) return ret; ret = applesmc_get_lower_bound(&s->temp_end, "U"); if (ret) return ret; s->temp_count = s->temp_end - s->temp_begin; ret = applesmc_has_key(LIGHT_SENSOR_LEFT_KEY, &left_light_sensor); if (ret) return ret; ret = applesmc_has_key(LIGHT_SENSOR_RIGHT_KEY, &right_light_sensor); if (ret) return ret; ret = applesmc_has_key(MOTION_SENSOR_KEY, &s->has_accelerometer); if (ret) return ret; ret = applesmc_has_key(BACKLIGHT_KEY, &s->has_key_backlight); if (ret) return ret; s->num_light_sensors = left_light_sensor + right_light_sensor; s->init_complete = true; pr_info("key=%d fan=%d temp=%d acc=%d lux=%d kbd=%d\n", s->key_count, s->fan_count, s->temp_count, s->has_accelerometer, s->num_light_sensors, s->has_key_backlight); return 0; } /* * applesmc_init_smcreg - Initialize register cache. * * Retries until initialization is successful, or the operation times out. * */ static int applesmc_init_smcreg(void) { int ms, ret; for (ms = 0; ms < INIT_TIMEOUT_MSECS; ms += INIT_WAIT_MSECS) { ret = applesmc_init_smcreg_try(); if (!ret) { if (ms) pr_info("init_smcreg() took %d ms\n", ms); return 0; } msleep(INIT_WAIT_MSECS); } kfree(smcreg.cache); smcreg.cache = NULL; return ret; } static void applesmc_destroy_smcreg(void) { kfree(smcreg.cache); smcreg.cache = NULL; smcreg.init_complete = false; } /* Device model stuff */ static int applesmc_probe(struct platform_device *dev) { int ret; ret = applesmc_init_smcreg(); if (ret) return ret; applesmc_device_init(); return 0; } /* Synchronize device with memorized backlight state */ static int applesmc_pm_resume(struct device *dev) { if (smcreg.has_key_backlight) applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2); return 0; } /* Reinitialize device on resume from hibernation */ static int applesmc_pm_restore(struct device *dev) { applesmc_device_init(); return applesmc_pm_resume(dev); } static const struct dev_pm_ops applesmc_pm_ops = { .resume = applesmc_pm_resume, .restore = applesmc_pm_restore, }; static struct platform_driver applesmc_driver = { .probe = applesmc_probe, .driver = { .name = "applesmc", .owner = THIS_MODULE, .pm = &applesmc_pm_ops, }, }; /* * applesmc_calibrate - Set our "resting" values. Callers must * hold applesmc_lock. */ static void applesmc_calibrate(void) { applesmc_read_motion_sensor(SENSOR_X, &rest_x); applesmc_read_motion_sensor(SENSOR_Y, &rest_y); rest_x = -rest_x; } static void applesmc_idev_poll(struct input_polled_dev *dev) { struct input_dev *idev = dev->input; s16 x, y; if (applesmc_read_motion_sensor(SENSOR_X, &x)) return; if (applesmc_read_motion_sensor(SENSOR_Y, &y)) return; x = -x; input_report_abs(idev, ABS_X, x - rest_x); input_report_abs(idev, ABS_Y, y - rest_y); input_sync(idev); } /* Sysfs Files */ static ssize_t applesmc_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "applesmc\n"); } static ssize_t applesmc_position_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; s16 x, y, z; ret = applesmc_read_motion_sensor(SENSOR_X, &x); if (ret) goto out; ret = applesmc_read_motion_sensor(SENSOR_Y, &y); if (ret) goto out; ret = applesmc_read_motion_sensor(SENSOR_Z, &z); if (ret) goto out; out: if (ret) return ret; else return snprintf(buf, PAGE_SIZE, "(%d,%d,%d)\n", x, y, z); } static ssize_t applesmc_light_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; static int data_length; int ret; u8 left = 0, right = 0; u8 buffer[10]; if (!data_length) { entry = applesmc_get_entry_by_key(LIGHT_SENSOR_LEFT_KEY); if (IS_ERR(entry)) return PTR_ERR(entry); if (entry->len > 10) return -ENXIO; data_length = entry->len; pr_info("light sensor data length set to %d\n", data_length); } ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); /* newer macbooks report a single 10-bit bigendian value */ if (data_length == 10) { left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; goto out; } left = buffer[2]; if (ret) goto out; ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); right = buffer[2]; out: if (ret) return ret; else return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", left, right); } /* Displays sensor key as label */ static ssize_t applesmc_show_sensor_label(struct device *dev, struct device_attribute *devattr, char *sysfsbuf) { int index = smcreg.temp_begin + to_index(devattr); const struct applesmc_entry *entry; entry = applesmc_get_entry_by_index(index); if (IS_ERR(entry)) return PTR_ERR(entry); return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->key); } /* Displays degree Celsius * 1000 */ static ssize_t applesmc_show_temperature(struct device *dev, struct device_attribute *devattr, char *sysfsbuf) { int index = smcreg.temp_begin + to_index(devattr); const struct applesmc_entry *entry; int ret; u8 buffer[2]; unsigned int temp; entry = applesmc_get_entry_by_index(index); if (IS_ERR(entry)) return PTR_ERR(entry); if (entry->len > 2) return -EINVAL; ret = applesmc_read_entry(entry, buffer, entry->len); if (ret) return ret; if (entry->len == 2) { temp = buffer[0] * 1000; temp += (buffer[1] >> 6) * 250; } else { temp = buffer[0] * 4000; } return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", temp); } static ssize_t applesmc_show_fan_speed(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; unsigned int speed = 0; char newkey[5]; u8 buffer[2]; sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr)); ret = applesmc_read_key(newkey, buffer, 2); speed = ((buffer[0] << 8 | buffer[1]) >> 2); if (ret) return ret; else return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); } static ssize_t applesmc_store_fan_speed(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { int ret; unsigned long speed; char newkey[5]; u8 buffer[2]; if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000) return -EINVAL; /* Bigger than a 14-bit value */ sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr)); buffer[0] = (speed >> 6) & 0xff; buffer[1] = (speed << 2) & 0xff; ret = applesmc_write_key(newkey, buffer, 2); if (ret) return ret; else return count; } static ssize_t applesmc_show_fan_manual(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; u16 manual = 0; u8 buffer[2]; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; if (ret) return ret; else return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); } static ssize_t applesmc_store_fan_manual(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { int ret; u8 buffer[2]; unsigned long input; u16 val; if (kstrtoul(sysfsbuf, 10, &input) < 0) return -EINVAL; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); val = (buffer[0] << 8 | buffer[1]); if (ret) goto out; if (input) val = val | (0x01 << to_index(attr)); else val = val & ~(0x01 << to_index(attr)); buffer[0] = (val >> 8) & 0xFF; buffer[1] = val & 0xFF; ret = applesmc_write_key(FANS_MANUAL, buffer, 2); out: if (ret) return ret; else return count; } static ssize_t applesmc_show_fan_position(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; char newkey[5]; u8 buffer[17]; sprintf(newkey, FAN_ID_FMT, to_index(attr)); ret = applesmc_read_key(newkey, buffer, 16); buffer[16] = 0; if (ret) return ret; else return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", buffer+4); } static ssize_t applesmc_calibrate_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", rest_x, rest_y); } static ssize_t applesmc_calibrate_store(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { applesmc_calibrate(); return count; } static void applesmc_backlight_set(struct work_struct *work) { applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2); } static DECLARE_WORK(backlight_work, &applesmc_backlight_set); static void applesmc_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { int ret; backlight_state[0] = value; ret = queue_work(applesmc_led_wq, &backlight_work); if (debug && (!ret)) printk(KERN_DEBUG "applesmc: work was already on the queue.\n"); } static ssize_t applesmc_key_count_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; u8 buffer[4]; u32 count; ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + ((u32)buffer[2]<<8) + buffer[3]; if (ret) return ret; else return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); } static ssize_t applesmc_key_at_index_read_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; int ret; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); ret = applesmc_read_entry(entry, sysfsbuf, entry->len); if (ret) return ret; return entry->len; } static ssize_t applesmc_key_at_index_data_length_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", entry->len); } static ssize_t applesmc_key_at_index_type_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->type); } static ssize_t applesmc_key_at_index_name_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->key); } static ssize_t applesmc_key_at_index_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", key_at_index); } static ssize_t applesmc_key_at_index_store(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { unsigned long newkey; if (kstrtoul(sysfsbuf, 10, &newkey) < 0 || newkey >= smcreg.key_count) return -EINVAL; key_at_index = newkey; return count; } static struct led_classdev applesmc_backlight = { .name = "smc::kbd_backlight", .default_trigger = "nand-disk", .brightness_set = applesmc_brightness_set, }; static struct applesmc_node_group info_group[] = { { "name", applesmc_name_show }, { "key_count", applesmc_key_count_show }, { "key_at_index", applesmc_key_at_index_show, applesmc_key_at_index_store }, { "key_at_index_name", applesmc_key_at_index_name_show }, { "key_at_index_type", applesmc_key_at_index_type_show }, { "key_at_index_data_length", applesmc_key_at_index_data_length_show }, { "key_at_index_data", applesmc_key_at_index_read_show }, { } }; static struct applesmc_node_group accelerometer_group[] = { { "position", applesmc_position_show }, { "calibrate", applesmc_calibrate_show, applesmc_calibrate_store }, { } }; static struct applesmc_node_group light_sensor_group[] = { { "light", applesmc_light_show }, { } }; static struct applesmc_node_group fan_group[] = { { "fan%d_label", applesmc_show_fan_position }, { "fan%d_input", applesmc_show_fan_speed, NULL, 0 }, { "fan%d_min", applesmc_show_fan_speed, applesmc_store_fan_speed, 1 }, { "fan%d_max", applesmc_show_fan_speed, NULL, 2 }, { "fan%d_safe", applesmc_show_fan_speed, NULL, 3 }, { "fan%d_output", applesmc_show_fan_speed, applesmc_store_fan_speed, 4 }, { "fan%d_manual", applesmc_show_fan_manual, applesmc_store_fan_manual }, { } }; static struct applesmc_node_group temp_group[] = { { "temp%d_label", applesmc_show_sensor_label }, { "temp%d_input", applesmc_show_temperature }, { } }; /* Module stuff */ /* * applesmc_destroy_nodes - remove files and free associated memory */ static void applesmc_destroy_nodes(struct applesmc_node_group *groups) { struct applesmc_node_group *grp; struct applesmc_dev_attr *node; for (grp = groups; grp->nodes; grp++) { for (node = grp->nodes; node->sda.dev_attr.attr.name; node++) sysfs_remove_file(&pdev->dev.kobj, &node->sda.dev_attr.attr); kfree(grp->nodes); grp->nodes = NULL; } } /* * applesmc_create_nodes - create a two-dimensional group of sysfs files */ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) { struct applesmc_node_group *grp; struct applesmc_dev_attr *node; struct attribute *attr; int ret, i; for (grp = groups; grp->format; grp++) { grp->nodes = kcalloc(num + 1, sizeof(*node), GFP_KERNEL); if (!grp->nodes) { ret = -ENOMEM; goto out; } for (i = 0; i < num; i++) { node = &grp->nodes[i]; sprintf(node->name, grp->format, i + 1); node->sda.index = (grp->option << 16) | (i & 0xffff); node->sda.dev_attr.show = grp->show; node->sda.dev_attr.store = grp->store; attr = &node->sda.dev_attr.attr; sysfs_attr_init(attr); attr->name = node->name; attr->mode = S_IRUGO | (grp->store ? S_IWUSR : 0); ret = sysfs_create_file(&pdev->dev.kobj, attr); if (ret) { attr->name = NULL; goto out; } } } return 0; out: applesmc_destroy_nodes(groups); return ret; } /* Create accelerometer ressources */ static int applesmc_create_accelerometer(void) { struct input_dev *idev; int ret; if (!smcreg.has_accelerometer) return 0; ret = applesmc_create_nodes(accelerometer_group, 1); if (ret) goto out; applesmc_idev = input_allocate_polled_device(); if (!applesmc_idev) { ret = -ENOMEM; goto out_sysfs; } applesmc_idev->poll = applesmc_idev_poll; applesmc_idev->poll_interval = APPLESMC_POLL_INTERVAL; /* initial calibrate for the input device */ applesmc_calibrate(); /* initialize the input device */ idev = applesmc_idev->input; idev->name = "applesmc"; idev->id.bustype = BUS_HOST; idev->dev.parent = &pdev->dev; idev->evbit[0] = BIT_MASK(EV_ABS); input_set_abs_params(idev, ABS_X, -256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT); input_set_abs_params(idev, ABS_Y, -256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT); ret = input_register_polled_device(applesmc_idev); if (ret) goto out_idev; return 0; out_idev: input_free_polled_device(applesmc_idev); out_sysfs: applesmc_destroy_nodes(accelerometer_group); out: pr_warn("driver init failed (ret=%d)!\n", ret); return ret; } /* Release all ressources used by the accelerometer */ static void applesmc_release_accelerometer(void) { if (!smcreg.has_accelerometer) return; input_unregister_polled_device(applesmc_idev); input_free_polled_device(applesmc_idev); applesmc_destroy_nodes(accelerometer_group); } static int applesmc_create_light_sensor(void) { if (!smcreg.num_light_sensors) return 0; return applesmc_create_nodes(light_sensor_group, 1); } static void applesmc_release_light_sensor(void) { if (!smcreg.num_light_sensors) return; applesmc_destroy_nodes(light_sensor_group); } static int applesmc_create_key_backlight(void) { if (!smcreg.has_key_backlight) return 0; applesmc_led_wq = create_singlethread_workqueue("applesmc-led"); if (!applesmc_led_wq) return -ENOMEM; return led_classdev_register(&pdev->dev, &applesmc_backlight); } static void applesmc_release_key_backlight(void) { if (!smcreg.has_key_backlight) return; led_classdev_unregister(&applesmc_backlight); destroy_workqueue(applesmc_led_wq); } static int applesmc_dmi_match(const struct dmi_system_id *id) { return 1; } /* * Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". * So we need to put "Apple MacBook Pro" before "Apple MacBook". */ static __initdata struct dmi_system_id applesmc_whitelist[] = { { applesmc_dmi_match, "Apple MacBook Air", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, }, { applesmc_dmi_match, "Apple MacBook Pro", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro") }, }, { applesmc_dmi_match, "Apple MacBook", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") }, }, { applesmc_dmi_match, "Apple Macmini", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini") }, }, { applesmc_dmi_match, "Apple MacPro", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, }, { applesmc_dmi_match, "Apple iMac", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "iMac") }, }, { .ident = NULL } }; static int __init applesmc_init(void) { int ret; if (!dmi_check_system(applesmc_whitelist)) { pr_warn("supported laptop not found!\n"); ret = -ENODEV; goto out; } if (!request_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS, "applesmc")) { ret = -ENXIO; goto out; } ret = platform_driver_register(&applesmc_driver); if (ret) goto out_region; pdev = platform_device_register_simple("applesmc", APPLESMC_DATA_PORT, NULL, 0); if (IS_ERR(pdev)) { ret = PTR_ERR(pdev); goto out_driver; } /* create register cache */ ret = applesmc_init_smcreg(); if (ret) goto out_device; ret = applesmc_create_nodes(info_group, 1); if (ret) goto out_smcreg; ret = applesmc_create_nodes(fan_group, smcreg.fan_count); if (ret) goto out_info; ret = applesmc_create_nodes(temp_group, smcreg.temp_count); if (ret) goto out_fans; ret = applesmc_create_accelerometer(); if (ret) goto out_temperature; ret = applesmc_create_light_sensor(); if (ret) goto out_accelerometer; ret = applesmc_create_key_backlight(); if (ret) goto out_light_sysfs; hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(hwmon_dev)) { ret = PTR_ERR(hwmon_dev); goto out_light_ledclass; } return 0; out_light_ledclass: applesmc_release_key_backlight(); out_light_sysfs: applesmc_release_light_sensor(); out_accelerometer: applesmc_release_accelerometer(); out_temperature: applesmc_destroy_nodes(temp_group); out_fans: applesmc_destroy_nodes(fan_group); out_info: applesmc_destroy_nodes(info_group); out_smcreg: applesmc_destroy_smcreg(); out_device: platform_device_unregister(pdev); out_driver: platform_driver_unregister(&applesmc_driver); out_region: release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS); out: pr_warn("driver init failed (ret=%d)!\n", ret); return ret; } static void __exit applesmc_exit(void) { hwmon_device_unregister(hwmon_dev); applesmc_release_key_backlight(); applesmc_release_light_sensor(); applesmc_release_accelerometer(); applesmc_destroy_nodes(temp_group); applesmc_destroy_nodes(fan_group); applesmc_destroy_nodes(info_group); applesmc_destroy_smcreg(); platform_device_unregister(pdev); platform_driver_unregister(&applesmc_driver); release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS); } module_init(applesmc_init); module_exit(applesmc_exit); MODULE_AUTHOR("Nicolas Boichat"); MODULE_DESCRIPTION("Apple SMC"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(dmi, applesmc_whitelist);
gpl-2.0
myjang0507/Polaris-a8elte
drivers/mfd/rtsx_pcr.c
819
31025
/* Driver for Realtek PCI-Express card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * Wei WANG <wei_wang@realsil.com.cn> * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include <linux/pci.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/highmem.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/idr.h> #include <linux/platform_device.h> #include <linux/mfd/core.h> #include <linux/mfd/rtsx_pci.h> #include <asm/unaligned.h> #include "rtsx_pcr.h" static bool msi_en = true; module_param(msi_en, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(msi_en, "Enable MSI"); static DEFINE_IDR(rtsx_pci_idr); static DEFINE_SPINLOCK(rtsx_pci_lock); static struct mfd_cell rtsx_pcr_cells[] = { [RTSX_SD_CARD] = { .name = DRV_NAME_RTSX_PCI_SDMMC, }, [RTSX_MS_CARD] = { .name = DRV_NAME_RTSX_PCI_MS, }, }; static DEFINE_PCI_DEVICE_TABLE(rtsx_pci_ids) = { { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { 0, } }; MODULE_DEVICE_TABLE(pci, rtsx_pci_ids); void rtsx_pci_start_run(struct rtsx_pcr *pcr) { /* If pci device removed, don't queue idle work any more */ if (pcr->remove_pci) return; if (pcr->state != PDEV_STAT_RUN) { pcr->state = PDEV_STAT_RUN; if (pcr->ops->enable_auto_blink) pcr->ops->enable_auto_blink(pcr); } mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200)); } EXPORT_SYMBOL_GPL(rtsx_pci_start_run); int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data) { int i; u32 val = HAIMR_WRITE_START; val |= (u32)(addr & 0x3FFF) << 16; val |= (u32)mask << 8; val |= (u32)data; rtsx_pci_writel(pcr, RTSX_HAIMR, val); for (i = 0; i < MAX_RW_REG_CNT; i++) { val = rtsx_pci_readl(pcr, RTSX_HAIMR); if ((val & HAIMR_TRANS_END) == 0) { if (data != (u8)val) return -EIO; return 0; } } return -ETIMEDOUT; } EXPORT_SYMBOL_GPL(rtsx_pci_write_register); int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data) { u32 val = HAIMR_READ_START; int i; val |= (u32)(addr & 0x3FFF) << 16; rtsx_pci_writel(pcr, RTSX_HAIMR, val); for (i = 0; i < MAX_RW_REG_CNT; i++) { val = rtsx_pci_readl(pcr, RTSX_HAIMR); if ((val & HAIMR_TRANS_END) == 0) break; } if (i >= MAX_RW_REG_CNT) return -ETIMEDOUT; if (data) *data = (u8)(val & 0xFF); return 0; } EXPORT_SYMBOL_GPL(rtsx_pci_read_register); int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) { int err, i, finished = 0; u8 tmp; rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8)); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81); err = rtsx_pci_send_cmd(pcr, 100); if (err < 0) return err; for (i = 0; i < 100000; i++) { err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); if (err < 0) return err; if (!(tmp & 0x80)) { finished = 1; break; } } if (!finished) return -ETIMEDOUT; return 0; } EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register); int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) { int err, i, finished = 0; u16 data; u8 *ptr, tmp; rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80); err = rtsx_pci_send_cmd(pcr, 100); if (err < 0) return err; for (i = 0; i < 100000; i++) { err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); if (err < 0) return err; if (!(tmp & 0x80)) { finished = 1; break; } } if (!finished) return -ETIMEDOUT; rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0); rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0); err = rtsx_pci_send_cmd(pcr, 100); if (err < 0) return err; ptr = rtsx_pci_get_cmd_data(pcr); data = ((u16)ptr[1] << 8) | ptr[0]; if (val) *val = data; return 0; } EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register); void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr) { rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80); rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80); } EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd); void rtsx_pci_add_cmd(struct rtsx_pcr *pcr, u8 cmd_type, u16 reg_addr, u8 mask, u8 data) { unsigned long flags; u32 val = 0; u32 *ptr = (u32 *)(pcr->host_cmds_ptr); val |= (u32)(cmd_type & 0x03) << 30; val |= (u32)(reg_addr & 0x3FFF) << 16; val |= (u32)mask << 8; val |= (u32)data; spin_lock_irqsave(&pcr->lock, flags); ptr += pcr->ci; if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) { put_unaligned_le32(val, ptr); ptr++; pcr->ci++; } spin_unlock_irqrestore(&pcr->lock, flags); } EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd); void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr) { u32 val = 1 << 31; rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; /* Hardware Auto Response */ val |= 0x40000000; rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); } EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait); int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout) { struct completion trans_done; u32 val = 1 << 31; long timeleft; unsigned long flags; int err = 0; spin_lock_irqsave(&pcr->lock, flags); /* set up data structures for the wakeup system */ pcr->done = &trans_done; pcr->trans_result = TRANS_NOT_READY; init_completion(&trans_done); rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; /* Hardware Auto Response */ val |= 0x40000000; rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); spin_unlock_irqrestore(&pcr->lock, flags); /* Wait for TRANS_OK_INT */ timeleft = wait_for_completion_interruptible_timeout( &trans_done, msecs_to_jiffies(timeout)); if (timeleft <= 0) { dev_dbg(&(pcr->pci->dev), "Timeout (%s %d)\n", __func__, __LINE__); err = -ETIMEDOUT; goto finish_send_cmd; } spin_lock_irqsave(&pcr->lock, flags); if (pcr->trans_result == TRANS_RESULT_FAIL) err = -EINVAL; else if (pcr->trans_result == TRANS_RESULT_OK) err = 0; else if (pcr->trans_result == TRANS_NO_DEVICE) err = -ENODEV; spin_unlock_irqrestore(&pcr->lock, flags); finish_send_cmd: spin_lock_irqsave(&pcr->lock, flags); pcr->done = NULL; spin_unlock_irqrestore(&pcr->lock, flags); if ((err < 0) && (err != -ENODEV)) rtsx_pci_stop_cmd(pcr); if (pcr->finish_me) complete(pcr->finish_me); return err; } EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd); static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr, dma_addr_t addr, unsigned int len, int end) { u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi; u64 val; u8 option = SG_VALID | SG_TRANS_DATA; dev_dbg(&(pcr->pci->dev), "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len); if (end) option |= SG_END; val = ((u64)addr << 32) | ((u64)len << 12) | option; put_unaligned_le64(val, ptr); pcr->sgi++; } int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, int num_sg, bool read, int timeout) { struct completion trans_done; u8 dir; int err = 0, i, count; long timeleft; unsigned long flags; struct scatterlist *sg; enum dma_data_direction dma_dir; u32 val; dma_addr_t addr; unsigned int len; dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg); /* don't transfer data during abort processing */ if (pcr->remove_pci) return -EINVAL; if ((sglist == NULL) || (num_sg <= 0)) return -EINVAL; if (read) { dir = DEVICE_TO_HOST; dma_dir = DMA_FROM_DEVICE; } else { dir = HOST_TO_DEVICE; dma_dir = DMA_TO_DEVICE; } count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir); if (count < 1) { dev_err(&(pcr->pci->dev), "scatterlist map failed\n"); return -EINVAL; } dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count); val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; pcr->sgi = 0; for_each_sg(sglist, sg, count, i) { addr = sg_dma_address(sg); len = sg_dma_len(sg); rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1); } spin_lock_irqsave(&pcr->lock, flags); pcr->done = &trans_done; pcr->trans_result = TRANS_NOT_READY; init_completion(&trans_done); rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); spin_unlock_irqrestore(&pcr->lock, flags); timeleft = wait_for_completion_interruptible_timeout( &trans_done, msecs_to_jiffies(timeout)); if (timeleft <= 0) { dev_dbg(&(pcr->pci->dev), "Timeout (%s %d)\n", __func__, __LINE__); err = -ETIMEDOUT; goto out; } spin_lock_irqsave(&pcr->lock, flags); if (pcr->trans_result == TRANS_RESULT_FAIL) err = -EINVAL; else if (pcr->trans_result == TRANS_NO_DEVICE) err = -ENODEV; spin_unlock_irqrestore(&pcr->lock, flags); out: spin_lock_irqsave(&pcr->lock, flags); pcr->done = NULL; spin_unlock_irqrestore(&pcr->lock, flags); dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir); if ((err < 0) && (err != -ENODEV)) rtsx_pci_stop_cmd(pcr); if (pcr->finish_me) complete(pcr->finish_me); return err; } EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) { int err; int i, j; u16 reg; u8 *ptr; if (buf_len > 512) buf_len = 512; ptr = buf; reg = PPBUF_BASE2; for (i = 0; i < buf_len / 256; i++) { rtsx_pci_init_cmd(pcr); for (j = 0; j < 256; j++) rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); err = rtsx_pci_send_cmd(pcr, 250); if (err < 0) return err; memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256); ptr += 256; } if (buf_len % 256) { rtsx_pci_init_cmd(pcr); for (j = 0; j < buf_len % 256; j++) rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); err = rtsx_pci_send_cmd(pcr, 250); if (err < 0) return err; } memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256); return 0; } EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf); int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) { int err; int i, j; u16 reg; u8 *ptr; if (buf_len > 512) buf_len = 512; ptr = buf; reg = PPBUF_BASE2; for (i = 0; i < buf_len / 256; i++) { rtsx_pci_init_cmd(pcr); for (j = 0; j < 256; j++) { rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg++, 0xFF, *ptr); ptr++; } err = rtsx_pci_send_cmd(pcr, 250); if (err < 0) return err; } if (buf_len % 256) { rtsx_pci_init_cmd(pcr); for (j = 0; j < buf_len % 256; j++) { rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg++, 0xFF, *ptr); ptr++; } err = rtsx_pci_send_cmd(pcr, 250); if (err < 0) return err; } return 0; } EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf); static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl) { int err; rtsx_pci_init_cmd(pcr); while (*tbl & 0xFFFF0000) { rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, (u16)(*tbl >> 16), 0xFF, (u8)(*tbl)); tbl++; } err = rtsx_pci_send_cmd(pcr, 100); if (err < 0) return err; return 0; } int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card) { const u32 *tbl; if (card == RTSX_SD_CARD) tbl = pcr->sd_pull_ctl_enable_tbl; else if (card == RTSX_MS_CARD) tbl = pcr->ms_pull_ctl_enable_tbl; else return -EINVAL; return rtsx_pci_set_pull_ctl(pcr, tbl); } EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable); int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card) { const u32 *tbl; if (card == RTSX_SD_CARD) tbl = pcr->sd_pull_ctl_disable_tbl; else if (card == RTSX_MS_CARD) tbl = pcr->ms_pull_ctl_disable_tbl; else return -EINVAL; return rtsx_pci_set_pull_ctl(pcr, tbl); } EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable); static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr) { pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN; if (pcr->num_slots > 1) pcr->bier |= MS_INT_EN; /* Enable Bus Interrupt */ rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier); dev_dbg(&(pcr->pci->dev), "RTSX_BIER: 0x%08x\n", pcr->bier); } static inline u8 double_ssc_depth(u8 depth) { return ((depth > 1) ? (depth - 1) : depth); } static u8 revise_ssc_depth(u8 ssc_depth, u8 div) { if (div > CLK_DIV_1) { if (ssc_depth > (div - 1)) ssc_depth -= (div - 1); else ssc_depth = SSC_DEPTH_4M; } return ssc_depth; } int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) { int err, clk; u8 n, clk_divider, mcu_cnt, div; u8 depth[] = { [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M, [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M, [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M, [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K, [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K, }; if (initial_mode) { /* We use 250k(around) here, in initial stage */ clk_divider = SD_CLK_DIVIDE_128; card_clock = 30000000; } else { clk_divider = SD_CLK_DIVIDE_0; } err = rtsx_pci_write_register(pcr, SD_CFG1, SD_CLK_DIVIDE_MASK, clk_divider); if (err < 0) return err; card_clock /= 1000000; dev_dbg(&(pcr->pci->dev), "Switch card clock to %dMHz\n", card_clock); clk = card_clock; if (!initial_mode && double_clk) clk = card_clock * 2; dev_dbg(&(pcr->pci->dev), "Internal SSC clock: %dMHz (cur_clock = %d)\n", clk, pcr->cur_clock); if (clk == pcr->cur_clock) return 0; if (pcr->ops->conv_clk_and_div_n) n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); else n = (u8)(clk - 2); if ((clk <= 2) || (n > MAX_DIV_N_PCR)) return -EINVAL; mcu_cnt = (u8)(125/clk + 3); if (mcu_cnt > 15) mcu_cnt = 15; /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */ div = CLK_DIV_1; while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) { if (pcr->ops->conv_clk_and_div_n) { int dbl_clk = pcr->ops->conv_clk_and_div_n(n, DIV_N_TO_CLK) * 2; n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk, CLK_TO_DIV_N); } else { n = (n + 2) * 2 - 2; } div++; } dev_dbg(&(pcr->pci->dev), "n = %d, div = %d\n", n, div); ssc_depth = depth[ssc_depth]; if (double_clk) ssc_depth = double_ssc_depth(ssc_depth); ssc_depth = revise_ssc_depth(ssc_depth, div); dev_dbg(&(pcr->pci->dev), "ssc_depth = %d\n", ssc_depth); rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0xFF, (div << 4) | mcu_cnt); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, SSC_DEPTH_MASK, ssc_depth); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); if (vpclk) { rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET); } err = rtsx_pci_send_cmd(pcr, 2000); if (err < 0) return err; /* Wait SSC clock stable */ udelay(10); err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); if (err < 0) return err; pcr->cur_clock = clk; return 0; } EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock); int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card) { if (pcr->ops->card_power_on) return pcr->ops->card_power_on(pcr, card); return 0; } EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on); int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card) { if (pcr->ops->card_power_off) return pcr->ops->card_power_off(pcr, card); return 0; } EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off); int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card) { unsigned int cd_mask[] = { [RTSX_SD_CARD] = SD_EXIST, [RTSX_MS_CARD] = MS_EXIST }; if (!pcr->ms_pmos) { /* When using single PMOS, accessing card is not permitted * if the existing card is not the designated one. */ if (pcr->card_exist & (~cd_mask[card])) return -EIO; } return 0; } EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check); int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) { if (pcr->ops->switch_output_voltage) return pcr->ops->switch_output_voltage(pcr, voltage); return 0; } EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage); unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr) { unsigned int val; val = rtsx_pci_readl(pcr, RTSX_BIPR); if (pcr->ops->cd_deglitch) val = pcr->ops->cd_deglitch(pcr); return val; } EXPORT_SYMBOL_GPL(rtsx_pci_card_exist); void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr) { struct completion finish; pcr->finish_me = &finish; init_completion(&finish); if (pcr->done) complete(pcr->done); if (!pcr->remove_pci) rtsx_pci_stop_cmd(pcr); wait_for_completion_interruptible_timeout(&finish, msecs_to_jiffies(2)); pcr->finish_me = NULL; } EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer); static void rtsx_pci_card_detect(struct work_struct *work) { struct delayed_work *dwork; struct rtsx_pcr *pcr; unsigned long flags; unsigned int card_detect = 0, card_inserted, card_removed; u32 irq_status; dwork = to_delayed_work(work); pcr = container_of(dwork, struct rtsx_pcr, carddet_work); dev_dbg(&(pcr->pci->dev), "--> %s\n", __func__); mutex_lock(&pcr->pcr_mutex); spin_lock_irqsave(&pcr->lock, flags); irq_status = rtsx_pci_readl(pcr, RTSX_BIPR); dev_dbg(&(pcr->pci->dev), "irq_status: 0x%08x\n", irq_status); irq_status &= CARD_EXIST; card_inserted = pcr->card_inserted & irq_status; card_removed = pcr->card_removed; pcr->card_inserted = 0; pcr->card_removed = 0; spin_unlock_irqrestore(&pcr->lock, flags); if (card_inserted || card_removed) { dev_dbg(&(pcr->pci->dev), "card_inserted: 0x%x, card_removed: 0x%x\n", card_inserted, card_removed); if (pcr->ops->cd_deglitch) card_inserted = pcr->ops->cd_deglitch(pcr); card_detect = card_inserted | card_removed; pcr->card_exist |= card_inserted; pcr->card_exist &= ~card_removed; } mutex_unlock(&pcr->pcr_mutex); if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event) pcr->slots[RTSX_SD_CARD].card_event( pcr->slots[RTSX_SD_CARD].p_dev); if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event) pcr->slots[RTSX_MS_CARD].card_event( pcr->slots[RTSX_MS_CARD].p_dev); } static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) { struct rtsx_pcr *pcr = dev_id; u32 int_reg; if (!pcr) return IRQ_NONE; spin_lock(&pcr->lock); int_reg = rtsx_pci_readl(pcr, RTSX_BIPR); /* Clear interrupt flag */ rtsx_pci_writel(pcr, RTSX_BIPR, int_reg); if ((int_reg & pcr->bier) == 0) { spin_unlock(&pcr->lock); return IRQ_NONE; } if (int_reg == 0xFFFFFFFF) { spin_unlock(&pcr->lock); return IRQ_HANDLED; } int_reg &= (pcr->bier | 0x7FFFFF); if (int_reg & SD_INT) { if (int_reg & SD_EXIST) { pcr->card_inserted |= SD_EXIST; } else { pcr->card_removed |= SD_EXIST; pcr->card_inserted &= ~SD_EXIST; } } if (int_reg & MS_INT) { if (int_reg & MS_EXIST) { pcr->card_inserted |= MS_EXIST; } else { pcr->card_removed |= MS_EXIST; pcr->card_inserted &= ~MS_EXIST; } } if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) { pcr->trans_result = TRANS_RESULT_FAIL; if (pcr->done) complete(pcr->done); } else if (int_reg & TRANS_OK_INT) { pcr->trans_result = TRANS_RESULT_OK; if (pcr->done) complete(pcr->done); } } if (pcr->card_inserted || pcr->card_removed) schedule_delayed_work(&pcr->carddet_work, msecs_to_jiffies(200)); spin_unlock(&pcr->lock); return IRQ_HANDLED; } static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr) { dev_info(&(pcr->pci->dev), "%s: pcr->msi_en = %d, pci->irq = %d\n", __func__, pcr->msi_en, pcr->pci->irq); if (request_irq(pcr->pci->irq, rtsx_pci_isr, pcr->msi_en ? 0 : IRQF_SHARED, DRV_NAME_RTSX_PCI, pcr)) { dev_err(&(pcr->pci->dev), "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n", pcr->pci->irq); return -1; } pcr->irq = pcr->pci->irq; pci_intx(pcr->pci, !pcr->msi_en); return 0; } static void rtsx_pci_idle_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work); dev_dbg(&(pcr->pci->dev), "--> %s\n", __func__); mutex_lock(&pcr->pcr_mutex); pcr->state = PDEV_STAT_IDLE; if (pcr->ops->disable_auto_blink) pcr->ops->disable_auto_blink(pcr); if (pcr->ops->turn_off_led) pcr->ops->turn_off_led(pcr); mutex_unlock(&pcr->pcr_mutex); } static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) { int err; rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); rtsx_pci_enable_bus_int(pcr); /* Power on SSC */ err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0); if (err < 0) return err; /* Wait SSC power stable */ udelay(200); if (pcr->ops->optimize_phy) { err = pcr->ops->optimize_phy(pcr); if (err < 0) return err; } rtsx_pci_init_cmd(pcr); /* Set mcu_cnt to 7 to ensure data can be sampled properly */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00); /* Disable card clock */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0); /* Reset ASPM state to default value */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0); /* Reset delink mode */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0); /* Card driving select */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D); /* Enable SSC Clock */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, 0xFF, SSC_8X_EN | SSC_SEL_4M); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12); /* Disable cd_pwr_save */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10); /* Clear Link Ready Interrupt */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT); /* Enlarge the estimation window of PERST# glitch * to reduce the chance of invalid card interrupt */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80); /* Update RC oscillator to 400k * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1 * 1: 2M 0: 400k */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00); /* Set interrupt write clear * bit 1: U_elbi_if_rd_clr_en * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0); /* Force CLKREQ# PIN to drive 0 to request clock */ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08); err = rtsx_pci_send_cmd(pcr, 100); if (err < 0) return err; /* Enable clk_request_n to enable clock power management */ rtsx_pci_write_config_byte(pcr, 0x81, 1); /* Enter L1 when host tx idle */ rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B); if (pcr->ops->extra_init_hw) { err = pcr->ops->extra_init_hw(pcr); if (err < 0) return err; } /* No CD interrupt if probing driver with card inserted. * So we need to initialize pcr->card_exist here. */ if (pcr->ops->cd_deglitch) pcr->card_exist = pcr->ops->cd_deglitch(pcr); else pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST; return 0; } static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) { int err; spin_lock_init(&pcr->lock); mutex_init(&pcr->pcr_mutex); switch (PCI_PID(pcr)) { default: case 0x5209: rts5209_init_params(pcr); break; case 0x5229: rts5229_init_params(pcr); break; case 0x5289: rtl8411_init_params(pcr); break; case 0x5227: rts5227_init_params(pcr); break; case 0x5249: rts5249_init_params(pcr); break; } dev_dbg(&(pcr->pci->dev), "PID: 0x%04x, IC version: 0x%02x\n", PCI_PID(pcr), pcr->ic_version); pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot), GFP_KERNEL); if (!pcr->slots) return -ENOMEM; pcr->state = PDEV_STAT_IDLE; err = rtsx_pci_init_hw(pcr); if (err < 0) { kfree(pcr->slots); return err; } return 0; } static int rtsx_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *id) { struct rtsx_pcr *pcr; struct pcr_handle *handle; u32 base, len; int ret, i; dev_dbg(&(pcidev->dev), ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n", pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, (int)pcidev->revision); ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); if (ret < 0) return ret; ret = pci_enable_device(pcidev); if (ret) return ret; ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI); if (ret) goto disable; pcr = kzalloc(sizeof(*pcr), GFP_KERNEL); if (!pcr) { ret = -ENOMEM; goto release_pci; } handle = kzalloc(sizeof(*handle), GFP_KERNEL); if (!handle) { ret = -ENOMEM; goto free_pcr; } handle->pcr = pcr; idr_preload(GFP_KERNEL); spin_lock(&rtsx_pci_lock); ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT); if (ret >= 0) pcr->id = ret; spin_unlock(&rtsx_pci_lock); idr_preload_end(); if (ret < 0) goto free_handle; pcr->pci = pcidev; dev_set_drvdata(&pcidev->dev, handle); len = pci_resource_len(pcidev, 0); base = pci_resource_start(pcidev, 0); pcr->remap_addr = ioremap_nocache(base, len); if (!pcr->remap_addr) { ret = -ENOMEM; goto free_host; } pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev), RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr), GFP_KERNEL); if (pcr->rtsx_resv_buf == NULL) { ret = -ENXIO; goto unmap; } pcr->host_cmds_ptr = pcr->rtsx_resv_buf; pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr; pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN; pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN; pcr->card_inserted = 0; pcr->card_removed = 0; INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect); INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work); pcr->msi_en = msi_en; if (pcr->msi_en) { ret = pci_enable_msi(pcidev); if (ret) pcr->msi_en = false; } ret = rtsx_pci_acquire_irq(pcr); if (ret < 0) goto disable_msi; pci_set_master(pcidev); synchronize_irq(pcr->irq); ret = rtsx_pci_init_chip(pcr); if (ret < 0) goto disable_irq; for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) { rtsx_pcr_cells[i].platform_data = handle; rtsx_pcr_cells[i].pdata_size = sizeof(*handle); } ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); if (ret < 0) goto disable_irq; schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); return 0; disable_irq: free_irq(pcr->irq, (void *)pcr); disable_msi: if (pcr->msi_en) pci_disable_msi(pcr->pci); dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); unmap: iounmap(pcr->remap_addr); free_host: dev_set_drvdata(&pcidev->dev, NULL); free_handle: kfree(handle); free_pcr: kfree(pcr); release_pci: pci_release_regions(pcidev); disable: pci_disable_device(pcidev); return ret; } static void rtsx_pci_remove(struct pci_dev *pcidev) { struct pcr_handle *handle = pci_get_drvdata(pcidev); struct rtsx_pcr *pcr = handle->pcr; pcr->remove_pci = true; /* Disable interrupts at the pcr level */ spin_lock_irq(&pcr->lock); rtsx_pci_writel(pcr, RTSX_BIER, 0); pcr->bier = 0; spin_unlock_irq(&pcr->lock); cancel_delayed_work_sync(&pcr->carddet_work); cancel_delayed_work_sync(&pcr->idle_work); mfd_remove_devices(&pcidev->dev); dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); free_irq(pcr->irq, (void *)pcr); if (pcr->msi_en) pci_disable_msi(pcr->pci); iounmap(pcr->remap_addr); dev_set_drvdata(&pcidev->dev, NULL); pci_release_regions(pcidev); pci_disable_device(pcidev); spin_lock(&rtsx_pci_lock); idr_remove(&rtsx_pci_idr, pcr->id); spin_unlock(&rtsx_pci_lock); kfree(pcr->slots); kfree(pcr); kfree(handle); dev_dbg(&(pcidev->dev), ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n", pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device); } #ifdef CONFIG_PM static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state) { struct pcr_handle *handle; struct rtsx_pcr *pcr; int ret = 0; dev_dbg(&(pcidev->dev), "--> %s\n", __func__); handle = pci_get_drvdata(pcidev); pcr = handle->pcr; cancel_delayed_work(&pcr->carddet_work); cancel_delayed_work(&pcr->idle_work); mutex_lock(&pcr->pcr_mutex); if (pcr->ops->turn_off_led) pcr->ops->turn_off_led(pcr); rtsx_pci_writel(pcr, RTSX_BIER, 0); pcr->bier = 0; rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08); rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x02); pci_save_state(pcidev); pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); pci_disable_device(pcidev); pci_set_power_state(pcidev, pci_choose_state(pcidev, state)); mutex_unlock(&pcr->pcr_mutex); return ret; } static int rtsx_pci_resume(struct pci_dev *pcidev) { struct pcr_handle *handle; struct rtsx_pcr *pcr; int ret = 0; dev_dbg(&(pcidev->dev), "--> %s\n", __func__); handle = pci_get_drvdata(pcidev); pcr = handle->pcr; mutex_lock(&pcr->pcr_mutex); pci_set_power_state(pcidev, PCI_D0); pci_restore_state(pcidev); ret = pci_enable_device(pcidev); if (ret) goto out; pci_set_master(pcidev); ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00); if (ret) goto out; ret = rtsx_pci_init_hw(pcr); if (ret) goto out; schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); out: mutex_unlock(&pcr->pcr_mutex); return ret; } #else /* CONFIG_PM */ #define rtsx_pci_suspend NULL #define rtsx_pci_resume NULL #endif /* CONFIG_PM */ static struct pci_driver rtsx_pci_driver = { .name = DRV_NAME_RTSX_PCI, .id_table = rtsx_pci_ids, .probe = rtsx_pci_probe, .remove = rtsx_pci_remove, .suspend = rtsx_pci_suspend, .resume = rtsx_pci_resume, }; module_pci_driver(rtsx_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>"); MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
gpl-2.0
shakalaca/ASUS_ZenFone_ZC451CG
linux/kernel/drivers/usb/gadget/fusb300_udc.c
2099
36752
/* * Fusb300 UDC (USB gadget) * * Copyright (C) 2010 Faraday Technology Corp. * * Author : Yuan-hsin Chen <yhchen@faraday-tech.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. */ #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include "fusb300_udc.h" MODULE_DESCRIPTION("FUSB300 USB gadget driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Yuan Hsin Chen <yhchen@faraday-tech.com>"); MODULE_ALIAS("platform:fusb300_udc"); #define DRIVER_VERSION "20 October 2010" static const char udc_name[] = "fusb300_udc"; static const char * const fusb300_ep_name[] = { "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7", "ep8", "ep9", "ep10", "ep11", "ep12", "ep13", "ep14", "ep15" }; static void done(struct fusb300_ep *ep, struct fusb300_request *req, int status); static void fusb300_enable_bit(struct fusb300 *fusb300, u32 offset, u32 value) { u32 reg = ioread32(fusb300->reg + offset); reg |= value; iowrite32(reg, fusb300->reg + offset); } static void fusb300_disable_bit(struct fusb300 *fusb300, u32 offset, u32 value) { u32 reg = ioread32(fusb300->reg + offset); reg &= ~value; iowrite32(reg, fusb300->reg + offset); } static void fusb300_ep_setting(struct fusb300_ep *ep, struct fusb300_ep_info info) { ep->epnum = info.epnum; ep->type = info.type; } static int fusb300_ep_release(struct fusb300_ep *ep) { if (!ep->epnum) return 0; ep->epnum = 0; ep->stall = 0; ep->wedged = 0; return 0; } static void fusb300_set_fifo_entry(struct fusb300 *fusb300, u32 ep) { u32 val = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); val &= ~FUSB300_EPSET1_FIFOENTRY_MSK; val |= FUSB300_EPSET1_FIFOENTRY(FUSB300_FIFO_ENTRY_NUM); iowrite32(val, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); } static void fusb300_set_start_entry(struct fusb300 *fusb300, u8 ep) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); u32 start_entry = fusb300->fifo_entry_num * FUSB300_FIFO_ENTRY_NUM; reg &= ~FUSB300_EPSET1_START_ENTRY_MSK ; reg |= FUSB300_EPSET1_START_ENTRY(start_entry); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); if (fusb300->fifo_entry_num == FUSB300_MAX_FIFO_ENTRY) { fusb300->fifo_entry_num = 0; fusb300->addrofs = 0; pr_err("fifo entry is over the maximum number!\n"); } else fusb300->fifo_entry_num++; } /* set fusb300_set_start_entry first before fusb300_set_epaddrofs */ static void fusb300_set_epaddrofs(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); reg &= ~FUSB300_EPSET2_ADDROFS_MSK; reg |= FUSB300_EPSET2_ADDROFS(fusb300->addrofs); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); fusb300->addrofs += (info.maxpacket + 7) / 8 * FUSB300_FIFO_ENTRY_NUM; } static void ep_fifo_setting(struct fusb300 *fusb300, struct fusb300_ep_info info) { fusb300_set_fifo_entry(fusb300, info.epnum); fusb300_set_start_entry(fusb300, info.epnum); fusb300_set_epaddrofs(fusb300, info); } static void fusb300_set_eptype(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_TYPE_MSK; reg |= FUSB300_EPSET1_TYPE(info.type); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void fusb300_set_epdir(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg; if (!info.dir_in) return; reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_DIR_MSK; reg |= FUSB300_EPSET1_DIRIN; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void fusb300_set_ep_active(struct fusb300 *fusb300, u8 ep) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); reg |= FUSB300_EPSET1_ACTEN; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); } static void fusb300_set_epmps(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); reg &= ~FUSB300_EPSET2_MPS_MSK; reg |= FUSB300_EPSET2_MPS(info.maxpacket); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); } static void fusb300_set_interval(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_INTERVAL(0x7); reg |= FUSB300_EPSET1_INTERVAL(info.interval); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void fusb300_set_bwnum(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_BWNUM(0x3); reg |= FUSB300_EPSET1_BWNUM(info.bw_num); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void set_ep_reg(struct fusb300 *fusb300, struct fusb300_ep_info info) { fusb300_set_eptype(fusb300, info); fusb300_set_epdir(fusb300, info); fusb300_set_epmps(fusb300, info); if (info.interval) fusb300_set_interval(fusb300, info); if (info.bw_num) fusb300_set_bwnum(fusb300, info); fusb300_set_ep_active(fusb300, info.epnum); } static int config_ep(struct fusb300_ep *ep, const struct usb_endpoint_descriptor *desc) { struct fusb300 *fusb300 = ep->fusb300; struct fusb300_ep_info info; ep->ep.desc = desc; info.interval = 0; info.addrofs = 0; info.bw_num = 0; info.type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; info.dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0; info.maxpacket = usb_endpoint_maxp(desc); info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; if ((info.type == USB_ENDPOINT_XFER_INT) || (info.type == USB_ENDPOINT_XFER_ISOC)) { info.interval = desc->bInterval; if (info.type == USB_ENDPOINT_XFER_ISOC) info.bw_num = ((desc->wMaxPacketSize & 0x1800) >> 11); } ep_fifo_setting(fusb300, info); set_ep_reg(fusb300, info); fusb300_ep_setting(ep, info); fusb300->ep[info.epnum] = ep; return 0; } static int fusb300_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct fusb300_ep *ep; ep = container_of(_ep, struct fusb300_ep, ep); if (ep->fusb300->reenum) { ep->fusb300->fifo_entry_num = 0; ep->fusb300->addrofs = 0; ep->fusb300->reenum = 0; } return config_ep(ep, desc); } static int fusb300_disable(struct usb_ep *_ep) { struct fusb300_ep *ep; struct fusb300_request *req; unsigned long flags; ep = container_of(_ep, struct fusb300_ep, ep); BUG_ON(!ep); while (!list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct fusb300_request, queue); spin_lock_irqsave(&ep->fusb300->lock, flags); done(ep, req, -ECONNRESET); spin_unlock_irqrestore(&ep->fusb300->lock, flags); } return fusb300_ep_release(ep); } static struct usb_request *fusb300_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct fusb300_request *req; req = kzalloc(sizeof(struct fusb300_request), gfp_flags); if (!req) return NULL; INIT_LIST_HEAD(&req->queue); return &req->req; } static void fusb300_free_request(struct usb_ep *_ep, struct usb_request *_req) { struct fusb300_request *req; req = container_of(_req, struct fusb300_request, req); kfree(req); } static int enable_fifo_int(struct fusb300_ep *ep) { struct fusb300 *fusb300 = ep->fusb300; if (ep->epnum) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_IGER0, FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum)); } else { pr_err("can't enable_fifo_int ep0\n"); return -EINVAL; } return 0; } static int disable_fifo_int(struct fusb300_ep *ep) { struct fusb300 *fusb300 = ep->fusb300; if (ep->epnum) { fusb300_disable_bit(fusb300, FUSB300_OFFSET_IGER0, FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum)); } else { pr_err("can't disable_fifo_int ep0\n"); return -EINVAL; } return 0; } static void fusb300_set_cxlen(struct fusb300 *fusb300, u32 length) { u32 reg; reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR); reg &= ~FUSB300_CSR_LEN_MSK; reg |= FUSB300_CSR_LEN(length); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_CSR); } /* write data to cx fifo */ static void fusb300_wrcxf(struct fusb300_ep *ep, struct fusb300_request *req) { int i = 0; u8 *tmp; u32 data; struct fusb300 *fusb300 = ep->fusb300; u32 length = req->req.length - req->req.actual; tmp = req->req.buf + req->req.actual; if (length > SS_CTL_MAX_PACKET_SIZE) { fusb300_set_cxlen(fusb300, SS_CTL_MAX_PACKET_SIZE); for (i = (SS_CTL_MAX_PACKET_SIZE >> 2); i > 0; i--) { data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | *(tmp + 3) << 24; iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); tmp += 4; } req->req.actual += SS_CTL_MAX_PACKET_SIZE; } else { /* length is less than max packet size */ fusb300_set_cxlen(fusb300, length); for (i = length >> 2; i > 0; i--) { data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | *(tmp + 3) << 24; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); tmp = tmp + 4; } switch (length % 4) { case 1: data = *tmp; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); break; case 2: data = *tmp | *(tmp + 1) << 8; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); break; case 3: data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); break; default: break; } req->req.actual += length; } } static void fusb300_set_epnstall(struct fusb300 *fusb300, u8 ep) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep), FUSB300_EPSET0_STL); } static void fusb300_clear_epnstall(struct fusb300 *fusb300, u8 ep) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); if (reg & FUSB300_EPSET0_STL) { printk(KERN_DEBUG "EP%d stall... Clear!!\n", ep); reg |= FUSB300_EPSET0_STL_CLR; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); } } static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req) { if (ep->fusb300->ep0_dir) { /* if IN */ if (req->req.length) { fusb300_wrcxf(ep, req); } else printk(KERN_DEBUG "%s : req->req.length = 0x%x\n", __func__, req->req.length); if ((req->req.length == req->req.actual) || (req->req.actual < ep->ep.maxpacket)) done(ep, req, 0); } else { /* OUT */ if (!req->req.length) done(ep, req, 0); else fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER1, FUSB300_IGER1_CX_OUT_INT); } } static int fusb300_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct fusb300_ep *ep; struct fusb300_request *req; unsigned long flags; int request = 0; ep = container_of(_ep, struct fusb300_ep, ep); req = container_of(_req, struct fusb300_request, req); if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&ep->fusb300->lock, flags); if (list_empty(&ep->queue)) request = 1; list_add_tail(&req->queue, &ep->queue); req->req.actual = 0; req->req.status = -EINPROGRESS; if (ep->ep.desc == NULL) /* ep0 */ ep0_queue(ep, req); else if (request && !ep->stall) enable_fifo_int(ep); spin_unlock_irqrestore(&ep->fusb300->lock, flags); return 0; } static int fusb300_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct fusb300_ep *ep; struct fusb300_request *req; unsigned long flags; ep = container_of(_ep, struct fusb300_ep, ep); req = container_of(_req, struct fusb300_request, req); spin_lock_irqsave(&ep->fusb300->lock, flags); if (!list_empty(&ep->queue)) done(ep, req, -ECONNRESET); spin_unlock_irqrestore(&ep->fusb300->lock, flags); return 0; } static int fusb300_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge) { struct fusb300_ep *ep; struct fusb300 *fusb300; unsigned long flags; int ret = 0; ep = container_of(_ep, struct fusb300_ep, ep); fusb300 = ep->fusb300; spin_lock_irqsave(&ep->fusb300->lock, flags); if (!list_empty(&ep->queue)) { ret = -EAGAIN; goto out; } if (value) { fusb300_set_epnstall(fusb300, ep->epnum); ep->stall = 1; if (wedge) ep->wedged = 1; } else { fusb300_clear_epnstall(fusb300, ep->epnum); ep->stall = 0; ep->wedged = 0; } out: spin_unlock_irqrestore(&ep->fusb300->lock, flags); return ret; } static int fusb300_set_halt(struct usb_ep *_ep, int value) { return fusb300_set_halt_and_wedge(_ep, value, 0); } static int fusb300_set_wedge(struct usb_ep *_ep) { return fusb300_set_halt_and_wedge(_ep, 1, 1); } static void fusb300_fifo_flush(struct usb_ep *_ep) { } static struct usb_ep_ops fusb300_ep_ops = { .enable = fusb300_enable, .disable = fusb300_disable, .alloc_request = fusb300_alloc_request, .free_request = fusb300_free_request, .queue = fusb300_queue, .dequeue = fusb300_dequeue, .set_halt = fusb300_set_halt, .fifo_flush = fusb300_fifo_flush, .set_wedge = fusb300_set_wedge, }; /*****************************************************************************/ static void fusb300_clear_int(struct fusb300 *fusb300, u32 offset, u32 value) { iowrite32(value, fusb300->reg + offset); } static void fusb300_reset(void) { } static void fusb300_set_cxstall(struct fusb300 *fusb300) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR, FUSB300_CSR_STL); } static void fusb300_set_cxdone(struct fusb300 *fusb300) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR, FUSB300_CSR_DONE); } /* read data from cx fifo */ void fusb300_rdcxf(struct fusb300 *fusb300, u8 *buffer, u32 length) { int i = 0; u8 *tmp; u32 data; tmp = buffer; for (i = (length >> 2); i > 0; i--) { data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; *(tmp + 3) = (data >> 24) & 0xFF; tmp = tmp + 4; } switch (length % 4) { case 1: data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; break; case 2: data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; break; case 3: data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; break; default: break; } } static void fusb300_rdfifo(struct fusb300_ep *ep, struct fusb300_request *req, u32 length) { int i = 0; u8 *tmp; u32 data, reg; struct fusb300 *fusb300 = ep->fusb300; tmp = req->req.buf + req->req.actual; req->req.actual += length; if (req->req.actual > req->req.length) printk(KERN_DEBUG "req->req.actual > req->req.length\n"); for (i = (length >> 2); i > 0; i--) { data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; *(tmp + 3) = (data >> 24) & 0xFF; tmp = tmp + 4; } switch (length % 4) { case 1: data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; break; case 2: data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; break; case 3: data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; break; default: break; } do { reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1); reg &= FUSB300_IGR1_SYNF0_EMPTY_INT; if (i) printk(KERN_INFO "sync fifo is not empty!\n"); i++; } while (!reg); } static u8 fusb300_get_epnstall(struct fusb300 *fusb300, u8 ep) { u8 value; u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); value = reg & FUSB300_EPSET0_STL; return value; } static u8 fusb300_get_cxstall(struct fusb300 *fusb300) { u8 value; u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR); value = (reg & FUSB300_CSR_STL) >> 1; return value; } static void request_error(struct fusb300 *fusb300) { fusb300_set_cxstall(fusb300); printk(KERN_DEBUG "request error!!\n"); } static void get_status(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) __releases(fusb300->lock) __acquires(fusb300->lock) { u8 ep; u16 status = 0; u16 w_index = ctrl->wIndex; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: status = 1 << USB_DEVICE_SELF_POWERED; break; case USB_RECIP_INTERFACE: status = 0; break; case USB_RECIP_ENDPOINT: ep = w_index & USB_ENDPOINT_NUMBER_MASK; if (ep) { if (fusb300_get_epnstall(fusb300, ep)) status = 1 << USB_ENDPOINT_HALT; } else { if (fusb300_get_cxstall(fusb300)) status = 0; } break; default: request_error(fusb300); return; /* exit */ } fusb300->ep0_data = cpu_to_le16(status); fusb300->ep0_req->buf = &fusb300->ep0_data; fusb300->ep0_req->length = 2; spin_unlock(&fusb300->lock); fusb300_queue(fusb300->gadget.ep0, fusb300->ep0_req, GFP_KERNEL); spin_lock(&fusb300->lock); } static void set_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { u8 ep; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_INTERFACE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_ENDPOINT: { u16 w_index = le16_to_cpu(ctrl->wIndex); ep = w_index & USB_ENDPOINT_NUMBER_MASK; if (ep) fusb300_set_epnstall(fusb300, ep); else fusb300_set_cxstall(fusb300); fusb300_set_cxdone(fusb300); } break; default: request_error(fusb300); break; } } static void fusb300_clear_seqnum(struct fusb300 *fusb300, u8 ep) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep), FUSB300_EPSET0_CLRSEQNUM); } static void clear_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { struct fusb300_ep *ep = fusb300->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK]; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_INTERFACE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_ENDPOINT: if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) { if (ep->wedged) { fusb300_set_cxdone(fusb300); break; } if (ep->stall) { ep->stall = 0; fusb300_clear_seqnum(fusb300, ep->epnum); fusb300_clear_epnstall(fusb300, ep->epnum); if (!list_empty(&ep->queue)) enable_fifo_int(ep); } } fusb300_set_cxdone(fusb300); break; default: request_error(fusb300); break; } } static void fusb300_set_dev_addr(struct fusb300 *fusb300, u16 addr) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_DAR); reg &= ~FUSB300_DAR_DRVADDR_MSK; reg |= FUSB300_DAR_DRVADDR(addr); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_DAR); } static void set_address(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { if (ctrl->wValue >= 0x0100) request_error(fusb300); else { fusb300_set_dev_addr(fusb300, ctrl->wValue); fusb300_set_cxdone(fusb300); } } #define UVC_COPY_DESCRIPTORS(mem, src) \ do { \ const struct usb_descriptor_header * const *__src; \ for (__src = src; *__src; ++__src) { \ memcpy(mem, *__src, (*__src)->bLength); \ mem += (*__src)->bLength; \ } \ } while (0) static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { u8 *p = (u8 *)ctrl; u8 ret = 0; u8 i = 0; fusb300_rdcxf(fusb300, p, 8); fusb300->ep0_dir = ctrl->bRequestType & USB_DIR_IN; fusb300->ep0_length = ctrl->wLength; /* check request */ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (ctrl->bRequest) { case USB_REQ_GET_STATUS: get_status(fusb300, ctrl); break; case USB_REQ_CLEAR_FEATURE: clear_feature(fusb300, ctrl); break; case USB_REQ_SET_FEATURE: set_feature(fusb300, ctrl); break; case USB_REQ_SET_ADDRESS: set_address(fusb300, ctrl); break; case USB_REQ_SET_CONFIGURATION: fusb300_enable_bit(fusb300, FUSB300_OFFSET_DAR, FUSB300_DAR_SETCONFG); /* clear sequence number */ for (i = 1; i <= FUSB300_MAX_NUM_EP; i++) fusb300_clear_seqnum(fusb300, i); fusb300->reenum = 1; ret = 1; break; default: ret = 1; break; } } else ret = 1; return ret; } static void done(struct fusb300_ep *ep, struct fusb300_request *req, int status) { list_del_init(&req->queue); /* don't modify queue heads during completion callback */ if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN) req->req.status = -ESHUTDOWN; else req->req.status = status; spin_unlock(&ep->fusb300->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&ep->fusb300->lock); if (ep->epnum) { disable_fifo_int(ep); if (!list_empty(&ep->queue)) enable_fifo_int(ep); } else fusb300_set_cxdone(ep->fusb300); } static void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep, dma_addr_t d, u32 len) { u32 value; u32 reg; /* wait SW owner */ do { reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum)); reg &= FUSB300_EPPRD0_H; } while (reg); iowrite32(d, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W1(ep->epnum)); value = FUSB300_EPPRD0_BTC(len) | FUSB300_EPPRD0_H | FUSB300_EPPRD0_F | FUSB300_EPPRD0_L | FUSB300_EPPRD0_I; iowrite32(value, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum)); iowrite32(0x0, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W2(ep->epnum)); fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_EPPRDRDY, FUSB300_EPPRDR_EP_PRD_RDY(ep->epnum)); } static void fusb300_wait_idma_finished(struct fusb300_ep *ep) { u32 reg; do { reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR1); if ((reg & FUSB300_IGR1_VBUS_CHG_INT) || (reg & FUSB300_IGR1_WARM_RST_INT) || (reg & FUSB300_IGR1_HOT_RST_INT) || (reg & FUSB300_IGR1_USBRST_INT) ) goto IDMA_RESET; reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR0); reg &= FUSB300_IGR0_EPn_PRD_INT(ep->epnum); } while (!reg); fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGR0, FUSB300_IGR0_EPn_PRD_INT(ep->epnum)); return; IDMA_RESET: reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGER0); reg &= ~FUSB300_IGER0_EEPn_PRD_INT(ep->epnum); iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_IGER0); } static void fusb300_set_idma(struct fusb300_ep *ep, struct fusb300_request *req) { int ret; ret = usb_gadget_map_request(&ep->fusb300->gadget, &req->req, DMA_TO_DEVICE); if (ret) return; fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER0, FUSB300_IGER0_EEPn_PRD_INT(ep->epnum)); fusb300_fill_idma_prdtbl(ep, req->req.dma, req->req.length); /* check idma is done */ fusb300_wait_idma_finished(ep); usb_gadget_unmap_request(&ep->fusb300->gadget, &req->req, DMA_TO_DEVICE); } static void in_ep_fifo_handler(struct fusb300_ep *ep) { struct fusb300_request *req = list_entry(ep->queue.next, struct fusb300_request, queue); if (req->req.length) fusb300_set_idma(ep, req); done(ep, req, 0); } static void out_ep_fifo_handler(struct fusb300_ep *ep) { struct fusb300 *fusb300 = ep->fusb300; struct fusb300_request *req = list_entry(ep->queue.next, struct fusb300_request, queue); u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum)); u32 length = reg & FUSB300_FFR_BYCNT; fusb300_rdfifo(ep, req, length); /* finish out transfer */ if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket)) done(ep, req, 0); } static void check_device_mode(struct fusb300 *fusb300) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_GCR); switch (reg & FUSB300_GCR_DEVEN_MSK) { case FUSB300_GCR_DEVEN_SS: fusb300->gadget.speed = USB_SPEED_SUPER; break; case FUSB300_GCR_DEVEN_HS: fusb300->gadget.speed = USB_SPEED_HIGH; break; case FUSB300_GCR_DEVEN_FS: fusb300->gadget.speed = USB_SPEED_FULL; break; default: fusb300->gadget.speed = USB_SPEED_UNKNOWN; break; } printk(KERN_INFO "dev_mode = %d\n", (reg & FUSB300_GCR_DEVEN_MSK)); } static void fusb300_ep0out(struct fusb300 *fusb300) { struct fusb300_ep *ep = fusb300->ep[0]; u32 reg; if (!list_empty(&ep->queue)) { struct fusb300_request *req; req = list_first_entry(&ep->queue, struct fusb300_request, queue); if (req->req.length) fusb300_rdcxf(ep->fusb300, req->req.buf, req->req.length); done(ep, req, 0); reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1); reg &= ~FUSB300_IGER1_CX_OUT_INT; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_IGER1); } else pr_err("%s : empty queue\n", __func__); } static void fusb300_ep0in(struct fusb300 *fusb300) { struct fusb300_request *req; struct fusb300_ep *ep = fusb300->ep[0]; if ((!list_empty(&ep->queue)) && (fusb300->ep0_dir)) { req = list_entry(ep->queue.next, struct fusb300_request, queue); if (req->req.length) fusb300_wrcxf(ep, req); if ((req->req.length - req->req.actual) < ep->ep.maxpacket) done(ep, req, 0); } else fusb300_set_cxdone(fusb300); } static void fusb300_grp2_handler(void) { } static void fusb300_grp3_handler(void) { } static void fusb300_grp4_handler(void) { } static void fusb300_grp5_handler(void) { } static irqreturn_t fusb300_irq(int irq, void *_fusb300) { struct fusb300 *fusb300 = _fusb300; u32 int_grp1 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1); u32 int_grp1_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1); u32 int_grp0 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR0); u32 int_grp0_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER0); struct usb_ctrlrequest ctrl; u8 in; u32 reg; int i; spin_lock(&fusb300->lock); int_grp1 &= int_grp1_en; int_grp0 &= int_grp0_en; if (int_grp1 & FUSB300_IGR1_WARM_RST_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_WARM_RST_INT); printk(KERN_INFO"fusb300_warmreset\n"); fusb300_reset(); } if (int_grp1 & FUSB300_IGR1_HOT_RST_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_HOT_RST_INT); printk(KERN_INFO"fusb300_hotreset\n"); fusb300_reset(); } if (int_grp1 & FUSB300_IGR1_USBRST_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_USBRST_INT); fusb300_reset(); } /* COMABT_INT has a highest priority */ if (int_grp1 & FUSB300_IGR1_CX_COMABT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_CX_COMABT_INT); printk(KERN_INFO"fusb300_ep0abt\n"); } if (int_grp1 & FUSB300_IGR1_VBUS_CHG_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_VBUS_CHG_INT); printk(KERN_INFO"fusb300_vbus_change\n"); } if (int_grp1 & FUSB300_IGR1_U3_EXIT_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U3_EXIT_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U2_EXIT_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_EXIT_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U1_EXIT_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_EXIT_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U2_ENTRY_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_ENTRY_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U1_ENTRY_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_ENTRY_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U3_EXIT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U3_EXIT_INT); printk(KERN_INFO "FUSB300_IGR1_U3_EXIT_INT\n"); } if (int_grp1 & FUSB300_IGR1_U2_EXIT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_EXIT_INT); printk(KERN_INFO "FUSB300_IGR1_U2_EXIT_INT\n"); } if (int_grp1 & FUSB300_IGR1_U1_EXIT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_EXIT_INT); printk(KERN_INFO "FUSB300_IGR1_U1_EXIT_INT\n"); } if (int_grp1 & FUSB300_IGR1_U3_ENTRY_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U3_ENTRY_INT); printk(KERN_INFO "FUSB300_IGR1_U3_ENTRY_INT\n"); fusb300_enable_bit(fusb300, FUSB300_OFFSET_SSCR1, FUSB300_SSCR1_GO_U3_DONE); } if (int_grp1 & FUSB300_IGR1_U2_ENTRY_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_ENTRY_INT); printk(KERN_INFO "FUSB300_IGR1_U2_ENTRY_INT\n"); } if (int_grp1 & FUSB300_IGR1_U1_ENTRY_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_ENTRY_INT); printk(KERN_INFO "FUSB300_IGR1_U1_ENTRY_INT\n"); } if (int_grp1 & FUSB300_IGR1_RESM_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_RESM_INT); printk(KERN_INFO "fusb300_resume\n"); } if (int_grp1 & FUSB300_IGR1_SUSP_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_SUSP_INT); printk(KERN_INFO "fusb300_suspend\n"); } if (int_grp1 & FUSB300_IGR1_HS_LPM_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_HS_LPM_INT); printk(KERN_INFO "fusb300_HS_LPM_INT\n"); } if (int_grp1 & FUSB300_IGR1_DEV_MODE_CHG_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_DEV_MODE_CHG_INT); check_device_mode(fusb300); } if (int_grp1 & FUSB300_IGR1_CX_COMFAIL_INT) { fusb300_set_cxstall(fusb300); printk(KERN_INFO "fusb300_ep0fail\n"); } if (int_grp1 & FUSB300_IGR1_CX_SETUP_INT) { printk(KERN_INFO "fusb300_ep0setup\n"); if (setup_packet(fusb300, &ctrl)) { spin_unlock(&fusb300->lock); if (fusb300->driver->setup(&fusb300->gadget, &ctrl) < 0) fusb300_set_cxstall(fusb300); spin_lock(&fusb300->lock); } } if (int_grp1 & FUSB300_IGR1_CX_CMDEND_INT) printk(KERN_INFO "fusb300_cmdend\n"); if (int_grp1 & FUSB300_IGR1_CX_OUT_INT) { printk(KERN_INFO "fusb300_cxout\n"); fusb300_ep0out(fusb300); } if (int_grp1 & FUSB300_IGR1_CX_IN_INT) { printk(KERN_INFO "fusb300_cxin\n"); fusb300_ep0in(fusb300); } if (int_grp1 & FUSB300_IGR1_INTGRP5) fusb300_grp5_handler(); if (int_grp1 & FUSB300_IGR1_INTGRP4) fusb300_grp4_handler(); if (int_grp1 & FUSB300_IGR1_INTGRP3) fusb300_grp3_handler(); if (int_grp1 & FUSB300_IGR1_INTGRP2) fusb300_grp2_handler(); if (int_grp0) { for (i = 1; i < FUSB300_MAX_NUM_EP; i++) { if (int_grp0 & FUSB300_IGR0_EPn_FIFO_INT(i)) { reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(i)); in = (reg & FUSB300_EPSET1_DIRIN) ? 1 : 0; if (in) in_ep_fifo_handler(fusb300->ep[i]); else out_ep_fifo_handler(fusb300->ep[i]); } } } spin_unlock(&fusb300->lock); return IRQ_HANDLED; } static void fusb300_set_u2_timeout(struct fusb300 *fusb300, u32 time) { u32 reg; reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT); reg &= ~0xff; reg |= FUSB300_SSCR2_U2TIMEOUT(time); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT); } static void fusb300_set_u1_timeout(struct fusb300 *fusb300, u32 time) { u32 reg; reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT); reg &= ~(0xff << 8); reg |= FUSB300_SSCR2_U1TIMEOUT(time); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT); } static void init_controller(struct fusb300 *fusb300) { u32 reg; u32 mask = 0; u32 val = 0; /* split on */ mask = val = FUSB300_AHBBCR_S0_SPLIT_ON | FUSB300_AHBBCR_S1_SPLIT_ON; reg = ioread32(fusb300->reg + FUSB300_OFFSET_AHBCR); reg &= ~mask; reg |= val; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_AHBCR); /* enable high-speed LPM */ mask = val = FUSB300_HSCR_HS_LPM_PERMIT; reg = ioread32(fusb300->reg + FUSB300_OFFSET_HSCR); reg &= ~mask; reg |= val; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_HSCR); /*set u1 u2 timmer*/ fusb300_set_u2_timeout(fusb300, 0xff); fusb300_set_u1_timeout(fusb300, 0xff); /* enable all grp1 interrupt */ iowrite32(0xcfffff9f, fusb300->reg + FUSB300_OFFSET_IGER1); } /*------------------------------------------------------------------------*/ static int fusb300_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct fusb300 *fusb300 = to_fusb300(g); /* hook up the driver */ driver->driver.bus = NULL; fusb300->driver = driver; return 0; } static int fusb300_udc_stop(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct fusb300 *fusb300 = to_fusb300(g); driver->unbind(&fusb300->gadget); init_controller(fusb300); fusb300->driver = NULL; return 0; } /*--------------------------------------------------------------------------*/ static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active) { return 0; } static const struct usb_gadget_ops fusb300_gadget_ops = { .pullup = fusb300_udc_pullup, .udc_start = fusb300_udc_start, .udc_stop = fusb300_udc_stop, }; static int __exit fusb300_remove(struct platform_device *pdev) { struct fusb300 *fusb300 = dev_get_drvdata(&pdev->dev); usb_del_gadget_udc(&fusb300->gadget); iounmap(fusb300->reg); free_irq(platform_get_irq(pdev, 0), fusb300); fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); kfree(fusb300); return 0; } static int __init fusb300_probe(struct platform_device *pdev) { struct resource *res, *ires, *ires1; void __iomem *reg = NULL; struct fusb300 *fusb300 = NULL; struct fusb300_ep *_ep[FUSB300_MAX_NUM_EP]; int ret = 0; int i; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; pr_err("platform_get_resource error.\n"); goto clean_up; } ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!ires) { ret = -ENODEV; dev_err(&pdev->dev, "platform_get_resource IORESOURCE_IRQ error.\n"); goto clean_up; } ires1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1); if (!ires1) { ret = -ENODEV; dev_err(&pdev->dev, "platform_get_resource IORESOURCE_IRQ 1 error.\n"); goto clean_up; } reg = ioremap(res->start, resource_size(res)); if (reg == NULL) { ret = -ENOMEM; pr_err("ioremap error.\n"); goto clean_up; } /* initialize udc */ fusb300 = kzalloc(sizeof(struct fusb300), GFP_KERNEL); if (fusb300 == NULL) { pr_err("kzalloc error\n"); goto clean_up; } for (i = 0; i < FUSB300_MAX_NUM_EP; i++) { _ep[i] = kzalloc(sizeof(struct fusb300_ep), GFP_KERNEL); if (_ep[i] == NULL) { pr_err("_ep kzalloc error\n"); goto clean_up; } fusb300->ep[i] = _ep[i]; } spin_lock_init(&fusb300->lock); dev_set_drvdata(&pdev->dev, fusb300); fusb300->gadget.ops = &fusb300_gadget_ops; fusb300->gadget.max_speed = USB_SPEED_HIGH; fusb300->gadget.name = udc_name; fusb300->reg = reg; ret = request_irq(ires->start, fusb300_irq, IRQF_SHARED, udc_name, fusb300); if (ret < 0) { pr_err("request_irq error (%d)\n", ret); goto clean_up; } ret = request_irq(ires1->start, fusb300_irq, IRQF_SHARED, udc_name, fusb300); if (ret < 0) { pr_err("request_irq1 error (%d)\n", ret); goto clean_up; } INIT_LIST_HEAD(&fusb300->gadget.ep_list); for (i = 0; i < FUSB300_MAX_NUM_EP ; i++) { struct fusb300_ep *ep = fusb300->ep[i]; if (i != 0) { INIT_LIST_HEAD(&fusb300->ep[i]->ep.ep_list); list_add_tail(&fusb300->ep[i]->ep.ep_list, &fusb300->gadget.ep_list); } ep->fusb300 = fusb300; INIT_LIST_HEAD(&ep->queue); ep->ep.name = fusb300_ep_name[i]; ep->ep.ops = &fusb300_ep_ops; ep->ep.maxpacket = HS_BULK_MAX_PACKET_SIZE; } fusb300->ep[0]->ep.maxpacket = HS_CTL_MAX_PACKET_SIZE; fusb300->ep[0]->epnum = 0; fusb300->gadget.ep0 = &fusb300->ep[0]->ep; INIT_LIST_HEAD(&fusb300->gadget.ep0->ep_list); fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep, GFP_KERNEL); if (fusb300->ep0_req == NULL) { ret = -ENOMEM; goto clean_up3; } init_controller(fusb300); ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget); if (ret) goto err_add_udc; dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); return 0; err_add_udc: fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); clean_up3: free_irq(ires->start, fusb300); clean_up: if (fusb300) { if (fusb300->ep0_req) fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); kfree(fusb300); } if (reg) iounmap(reg); return ret; } static struct platform_driver fusb300_driver = { .remove = __exit_p(fusb300_remove), .driver = { .name = (char *) udc_name, .owner = THIS_MODULE, }, }; module_platform_driver_probe(fusb300_driver, fusb300_probe);
gpl-2.0
ashwinr64/android_kernel_cyanogen_msm8916
drivers/net/wireless/ath/ath5k/base.c
2099
83065
/*- * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting * Copyright (c) 2004-2005 Atheros Communications, Inc. * Copyright (c) 2006 Devicescape Software, Inc. * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/hardirq.h> #include <linux/if.h> #include <linux/io.h> #include <linux/netdevice.h> #include <linux/cache.h> #include <linux/ethtool.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/etherdevice.h> #include <linux/nl80211.h> #include <net/ieee80211_radiotap.h> #include <asm/unaligned.h> #include "base.h" #include "reg.h" #include "debug.h" #include "ani.h" #include "ath5k.h" #include "../regd.h" #define CREATE_TRACE_POINTS #include "trace.h" bool ath5k_modparam_nohwcrypt; module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); static bool modparam_fastchanswitch; module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO); MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios."); static bool ath5k_modparam_no_hw_rfkill_switch; module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch, bool, S_IRUGO); MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state"); /* Module info */ MODULE_AUTHOR("Jiri Slaby"); MODULE_AUTHOR("Nick Kossifidis"); MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); static int ath5k_init(struct ieee80211_hw *hw); static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, bool skip_pcu); /* Known SREVs */ static const struct ath5k_srev_name srev_names[] = { #ifdef CONFIG_ATHEROS_AR231X { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 }, { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 }, { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 }, { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 }, { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 }, { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 }, { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 }, #else { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 }, { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 }, { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A }, { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B }, { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 }, { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 }, { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 }, { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A }, { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 }, { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 }, { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 }, { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 }, { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 }, { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 }, { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 }, { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 }, { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 }, { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 }, #endif { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN }, { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A }, { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 }, { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 }, { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A }, { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B }, { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 }, { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A }, { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B }, { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 }, { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 }, { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 }, { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, #ifdef CONFIG_ATHEROS_AR231X { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 }, { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 }, #endif { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, }; static const struct ieee80211_rate ath5k_rates[] = { { .bitrate = 10, .hw_value = ATH5K_RATE_CODE_1M, }, { .bitrate = 20, .hw_value = ATH5K_RATE_CODE_2M, .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .hw_value = ATH5K_RATE_CODE_5_5M, .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .hw_value = ATH5K_RATE_CODE_11M, .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60, .hw_value = ATH5K_RATE_CODE_6M, .flags = 0 }, { .bitrate = 90, .hw_value = ATH5K_RATE_CODE_9M, .flags = 0 }, { .bitrate = 120, .hw_value = ATH5K_RATE_CODE_12M, .flags = 0 }, { .bitrate = 180, .hw_value = ATH5K_RATE_CODE_18M, .flags = 0 }, { .bitrate = 240, .hw_value = ATH5K_RATE_CODE_24M, .flags = 0 }, { .bitrate = 360, .hw_value = ATH5K_RATE_CODE_36M, .flags = 0 }, { .bitrate = 480, .hw_value = ATH5K_RATE_CODE_48M, .flags = 0 }, { .bitrate = 540, .hw_value = ATH5K_RATE_CODE_54M, .flags = 0 }, }; static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) { u64 tsf = ath5k_hw_get_tsf64(ah); if ((tsf & 0x7fff) < rstamp) tsf -= 0x8000; return (tsf & ~0x7fff) | rstamp; } const char * ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) { const char *name = "xxxxx"; unsigned int i; for (i = 0; i < ARRAY_SIZE(srev_names); i++) { if (srev_names[i].sr_type != type) continue; if ((val & 0xf0) == srev_names[i].sr_val) name = srev_names[i].sr_name; if ((val & 0xff) == srev_names[i].sr_val) { name = srev_names[i].sr_name; break; } } return name; } static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset) { struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; return ath5k_hw_reg_read(ah, reg_offset); } static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) { struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; ath5k_hw_reg_write(ah, val, reg_offset); } static const struct ath_ops ath5k_common_ops = { .read = ath5k_ioread32, .write = ath5k_iowrite32, }; /***********************\ * Driver Initialization * \***********************/ static void ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ath5k_hw *ah = hw->priv; struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah); ath_reg_notifier_apply(wiphy, request, regulatory); } /********************\ * Channel/mode setup * \********************/ /* * Returns true for the channel numbers used. */ #ifdef CONFIG_ATH5K_TEST_CHANNELS static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) { return true; } #else static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) { if (band == IEEE80211_BAND_2GHZ && chan <= 14) return true; return /* UNII 1,2 */ (((chan & 3) == 0 && chan >= 36 && chan <= 64) || /* midband */ ((chan & 3) == 0 && chan >= 100 && chan <= 140) || /* UNII-3 */ ((chan & 3) == 1 && chan >= 149 && chan <= 165) || /* 802.11j 5.030-5.080 GHz (20MHz) */ (chan == 8 || chan == 12 || chan == 16) || /* 802.11j 4.9GHz (20MHz) */ (chan == 184 || chan == 188 || chan == 192 || chan == 196)); } #endif static unsigned int ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels, unsigned int mode, unsigned int max) { unsigned int count, size, freq, ch; enum ieee80211_band band; switch (mode) { case AR5K_MODE_11A: /* 1..220, but 2GHz frequencies are filtered by check_channel */ size = 220; band = IEEE80211_BAND_5GHZ; break; case AR5K_MODE_11B: case AR5K_MODE_11G: size = 26; band = IEEE80211_BAND_2GHZ; break; default: ATH5K_WARN(ah, "bad mode, not copying channels\n"); return 0; } count = 0; for (ch = 1; ch <= size && count < max; ch++) { freq = ieee80211_channel_to_frequency(ch, band); if (freq == 0) /* mapping failed - not a standard channel */ continue; /* Write channel info, needed for ath5k_channel_ok() */ channels[count].center_freq = freq; channels[count].band = band; channels[count].hw_value = mode; /* Check if channel is supported by the chipset */ if (!ath5k_channel_ok(ah, &channels[count])) continue; if (!ath5k_is_standard_channel(ch, band)) continue; count++; } return count; } static void ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b) { u8 i; for (i = 0; i < AR5K_MAX_RATES; i++) ah->rate_idx[b->band][i] = -1; for (i = 0; i < b->n_bitrates; i++) { ah->rate_idx[b->band][b->bitrates[i].hw_value] = i; if (b->bitrates[i].hw_value_short) ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i; } } static int ath5k_setup_bands(struct ieee80211_hw *hw) { struct ath5k_hw *ah = hw->priv; struct ieee80211_supported_band *sband; int max_c, count_c = 0; int i; BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS); max_c = ARRAY_SIZE(ah->channels); /* 2GHz band */ sband = &ah->sbands[IEEE80211_BAND_2GHZ]; sband->band = IEEE80211_BAND_2GHZ; sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0]; if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) { /* G mode */ memcpy(sband->bitrates, &ath5k_rates[0], sizeof(struct ieee80211_rate) * 12); sband->n_bitrates = 12; sband->channels = ah->channels; sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11G, max_c); hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; count_c = sband->n_channels; max_c -= count_c; } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) { /* B mode */ memcpy(sband->bitrates, &ath5k_rates[0], sizeof(struct ieee80211_rate) * 4); sband->n_bitrates = 4; /* 5211 only supports B rates and uses 4bit rate codes * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B) * fix them up here: */ if (ah->ah_version == AR5K_AR5211) { for (i = 0; i < 4; i++) { sband->bitrates[i].hw_value = sband->bitrates[i].hw_value & 0xF; sband->bitrates[i].hw_value_short = sband->bitrates[i].hw_value_short & 0xF; } } sband->channels = ah->channels; sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11B, max_c); hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; count_c = sband->n_channels; max_c -= count_c; } ath5k_setup_rate_idx(ah, sband); /* 5GHz band, A mode */ if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) { sband = &ah->sbands[IEEE80211_BAND_5GHZ]; sband->band = IEEE80211_BAND_5GHZ; sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0]; memcpy(sband->bitrates, &ath5k_rates[4], sizeof(struct ieee80211_rate) * 8); sband->n_bitrates = 8; sband->channels = &ah->channels[count_c]; sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11A, max_c); hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; } ath5k_setup_rate_idx(ah, sband); ath5k_debug_dump_bands(ah); return 0; } /* * Set/change channels. We always reset the chip. * To accomplish this we must first cleanup any pending DMA, * then restart stuff after a la ath5k_init. * * Called with ah->lock. */ int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan) { ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "channel set, resetting (%u -> %u MHz)\n", ah->curchan->center_freq, chan->center_freq); /* * To switch channels clear any pending DMA operations; * wait long enough for the RX fifo to drain, reset the * hardware at the new frequency, and then re-enable * the relevant bits of the h/w. */ return ath5k_reset(ah, chan, true); } void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath5k_vif_iter_data *iter_data = data; int i; struct ath5k_vif *avf = (void *)vif->drv_priv; if (iter_data->hw_macaddr) for (i = 0; i < ETH_ALEN; i++) iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); if (!iter_data->found_active) { iter_data->found_active = true; memcpy(iter_data->active_mac, mac, ETH_ALEN); } if (iter_data->need_set_hw_addr && iter_data->hw_macaddr) if (ether_addr_equal(iter_data->hw_macaddr, mac)) iter_data->need_set_hw_addr = false; if (!iter_data->any_assoc) { if (avf->assoc) iter_data->any_assoc = true; } /* Calculate combined mode - when APs are active, operate in AP mode. * Otherwise use the mode of the new interface. This can currently * only deal with combinations of APs and STAs. Only one ad-hoc * interfaces is allowed. */ if (avf->opmode == NL80211_IFTYPE_AP) iter_data->opmode = NL80211_IFTYPE_AP; else { if (avf->opmode == NL80211_IFTYPE_STATION) iter_data->n_stas++; if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED) iter_data->opmode = avf->opmode; } } void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah, struct ieee80211_vif *vif) { struct ath_common *common = ath5k_hw_common(ah); struct ath5k_vif_iter_data iter_data; u32 rfilt; /* * Use the hardware MAC address as reference, the hardware uses it * together with the BSSID mask when matching addresses. */ iter_data.hw_macaddr = common->macaddr; memset(&iter_data.mask, 0xff, ETH_ALEN); iter_data.found_active = false; iter_data.need_set_hw_addr = true; iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED; iter_data.n_stas = 0; if (vif) ath5k_vif_iter(&iter_data, vif->addr, vif); /* Get list of all active MAC addresses */ ieee80211_iterate_active_interfaces_atomic( ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL, ath5k_vif_iter, &iter_data); memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN); ah->opmode = iter_data.opmode; if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED) /* Nothing active, default to station mode */ ah->opmode = NL80211_IFTYPE_STATION; ath5k_hw_set_opmode(ah, ah->opmode); ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n", ah->opmode, ath_opmode_to_string(ah->opmode)); if (iter_data.need_set_hw_addr && iter_data.found_active) ath5k_hw_set_lladdr(ah, iter_data.active_mac); if (ath5k_hw_hasbssidmask(ah)) ath5k_hw_set_bssid_mask(ah, ah->bssidmask); /* Set up RX Filter */ if (iter_data.n_stas > 1) { /* If you have multiple STA interfaces connected to * different APs, ARPs are not received (most of the time?) * Enabling PROMISC appears to fix that problem. */ ah->filter_flags |= AR5K_RX_FILTER_PROM; } rfilt = ah->filter_flags; ath5k_hw_set_rx_filter(ah, rfilt); ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); } static inline int ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix) { int rix; /* return base rate on errors */ if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES, "hw_rix out of bounds: %x\n", hw_rix)) return 0; rix = ah->rate_idx[ah->curchan->band][hw_rix]; if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) rix = 0; return rix; } /***************\ * Buffers setup * \***************/ static struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr) { struct ath_common *common = ath5k_hw_common(ah); struct sk_buff *skb; /* * Allocate buffer with headroom_needed space for the * fake physical layer header at the start. */ skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); if (!skb) { ATH5K_ERR(ah, "can't alloc skbuff of size %u\n", common->rx_bufsize); return NULL; } *skb_addr = dma_map_single(ah->dev, skb->data, common->rx_bufsize, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) { ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__); dev_kfree_skb(skb); return NULL; } return skb; } static int ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf) { struct sk_buff *skb = bf->skb; struct ath5k_desc *ds; int ret; if (!skb) { skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr); if (!skb) return -ENOMEM; bf->skb = skb; } /* * Setup descriptors. For receive we always terminate * the descriptor list with a self-linked entry so we'll * not get overrun under high load (as can happen with a * 5212 when ANI processing enables PHY error frames). * * To ensure the last descriptor is self-linked we create * each descriptor as self-linked and add it to the end. As * each additional descriptor is added the previous self-linked * entry is "fixed" naturally. This should be safe even * if DMA is happening. When processing RX interrupts we * never remove/process the last, self-linked, entry on the * descriptor list. This ensures the hardware always has * someplace to write a new frame. */ ds = bf->desc; ds->ds_link = bf->daddr; /* link to self */ ds->ds_data = bf->skbaddr; ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); if (ret) { ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__); return ret; } if (ah->rxlink != NULL) *ah->rxlink = bf->daddr; ah->rxlink = &ds->ds_link; return 0; } static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb) { struct ieee80211_hdr *hdr; enum ath5k_pkt_type htype; __le16 fc; hdr = (struct ieee80211_hdr *)skb->data; fc = hdr->frame_control; if (ieee80211_is_beacon(fc)) htype = AR5K_PKT_TYPE_BEACON; else if (ieee80211_is_probe_resp(fc)) htype = AR5K_PKT_TYPE_PROBE_RESP; else if (ieee80211_is_atim(fc)) htype = AR5K_PKT_TYPE_ATIM; else if (ieee80211_is_pspoll(fc)) htype = AR5K_PKT_TYPE_PSPOLL; else htype = AR5K_PKT_TYPE_NORMAL; return htype; } static int ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, struct ath5k_txq *txq, int padsize) { struct ath5k_desc *ds = bf->desc; struct sk_buff *skb = bf->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; struct ieee80211_rate *rate; unsigned int mrr_rate[3], mrr_tries[3]; int i, ret; u16 hw_rate; u16 cts_rate = 0; u16 duration = 0; u8 rc_flags; flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; /* XXX endianness */ bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len, DMA_TO_DEVICE); rate = ieee80211_get_tx_rate(ah->hw, info); if (!rate) { ret = -EINVAL; goto err_unmap; } if (info->flags & IEEE80211_TX_CTL_NO_ACK) flags |= AR5K_TXDESC_NOACK; rc_flags = info->control.rates[0].flags; hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? rate->hw_value_short : rate->hw_value; pktlen = skb->len; /* FIXME: If we are in g mode and rate is a CCK rate * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta * from tx power (value is in dB units already) */ if (info->control.hw_key) { keyidx = info->control.hw_key->hw_key_idx; pktlen += info->control.hw_key->icv_len; } if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { flags |= AR5K_TXDESC_RTSENA; cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value; duration = le16_to_cpu(ieee80211_rts_duration(ah->hw, info->control.vif, pktlen, info)); } if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { flags |= AR5K_TXDESC_CTSENA; cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value; duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw, info->control.vif, pktlen, info)); } ret = ah->ah_setup_tx_desc(ah, ds, pktlen, ieee80211_get_hdrlen_from_skb(skb), padsize, get_hw_packet_type(skb), (ah->ah_txpower.txp_requested * 2), hw_rate, info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, cts_rate, duration); if (ret) goto err_unmap; /* Set up MRR descriptor */ if (ah->ah_capabilities.cap_has_mrr_support) { memset(mrr_rate, 0, sizeof(mrr_rate)); memset(mrr_tries, 0, sizeof(mrr_tries)); for (i = 0; i < 3; i++) { rate = ieee80211_get_alt_retry_rate(ah->hw, info, i); if (!rate) break; mrr_rate[i] = rate->hw_value; mrr_tries[i] = info->control.rates[i + 1].count; } ath5k_hw_setup_mrr_tx_desc(ah, ds, mrr_rate[0], mrr_tries[0], mrr_rate[1], mrr_tries[1], mrr_rate[2], mrr_tries[2]); } ds->ds_link = 0; ds->ds_data = bf->skbaddr; spin_lock_bh(&txq->lock); list_add_tail(&bf->list, &txq->q); txq->txq_len++; if (txq->link == NULL) /* is this first packet? */ ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); else /* no, so only link it */ *txq->link = bf->daddr; txq->link = &ds->ds_link; ath5k_hw_start_tx_dma(ah, txq->qnum); mmiowb(); spin_unlock_bh(&txq->lock); return 0; err_unmap: dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE); return ret; } /*******************\ * Descriptors setup * \*******************/ static int ath5k_desc_alloc(struct ath5k_hw *ah) { struct ath5k_desc *ds; struct ath5k_buf *bf; dma_addr_t da; unsigned int i; int ret; /* allocate descriptors */ ah->desc_len = sizeof(struct ath5k_desc) * (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1); ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len, &ah->desc_daddr, GFP_KERNEL); if (ah->desc == NULL) { ATH5K_ERR(ah, "can't allocate descriptors\n"); ret = -ENOMEM; goto err; } ds = ah->desc; da = ah->desc_daddr; ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n", ds, ah->desc_len, (unsigned long long)ah->desc_daddr); bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF, sizeof(struct ath5k_buf), GFP_KERNEL); if (bf == NULL) { ATH5K_ERR(ah, "can't allocate bufptr\n"); ret = -ENOMEM; goto err_free; } ah->bufptr = bf; INIT_LIST_HEAD(&ah->rxbuf); for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) { bf->desc = ds; bf->daddr = da; list_add_tail(&bf->list, &ah->rxbuf); } INIT_LIST_HEAD(&ah->txbuf); ah->txbuf_len = ATH_TXBUF; for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) { bf->desc = ds; bf->daddr = da; list_add_tail(&bf->list, &ah->txbuf); } /* beacon buffers */ INIT_LIST_HEAD(&ah->bcbuf); for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) { bf->desc = ds; bf->daddr = da; list_add_tail(&bf->list, &ah->bcbuf); } return 0; err_free: dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr); err: ah->desc = NULL; return ret; } void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf) { BUG_ON(!bf); if (!bf->skb) return; dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len, DMA_TO_DEVICE); ieee80211_free_txskb(ah->hw, bf->skb); bf->skb = NULL; bf->skbaddr = 0; bf->desc->ds_data = 0; } void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf) { struct ath_common *common = ath5k_hw_common(ah); BUG_ON(!bf); if (!bf->skb) return; dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize, DMA_FROM_DEVICE); dev_kfree_skb_any(bf->skb); bf->skb = NULL; bf->skbaddr = 0; bf->desc->ds_data = 0; } static void ath5k_desc_free(struct ath5k_hw *ah) { struct ath5k_buf *bf; list_for_each_entry(bf, &ah->txbuf, list) ath5k_txbuf_free_skb(ah, bf); list_for_each_entry(bf, &ah->rxbuf, list) ath5k_rxbuf_free_skb(ah, bf); list_for_each_entry(bf, &ah->bcbuf, list) ath5k_txbuf_free_skb(ah, bf); /* Free memory associated with all descriptors */ dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr); ah->desc = NULL; ah->desc_daddr = 0; kfree(ah->bufptr); ah->bufptr = NULL; } /**************\ * Queues setup * \**************/ static struct ath5k_txq * ath5k_txq_setup(struct ath5k_hw *ah, int qtype, int subtype) { struct ath5k_txq *txq; struct ath5k_txq_info qi = { .tqi_subtype = subtype, /* XXX: default values not correct for B and XR channels, * but who cares? */ .tqi_aifs = AR5K_TUNE_AIFS, .tqi_cw_min = AR5K_TUNE_CWMIN, .tqi_cw_max = AR5K_TUNE_CWMAX }; int qnum; /* * Enable interrupts only for EOL and DESC conditions. * We mark tx descriptors to receive a DESC interrupt * when a tx queue gets deep; otherwise we wait for the * EOL to reap descriptors. Note that this is done to * reduce interrupt load and this only defers reaping * descriptors, never transmitting frames. Aside from * reducing interrupts this also permits more concurrency. * The only potential downside is if the tx queue backs * up in which case the top half of the kernel may backup * due to a lack of tx descriptors. */ qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | AR5K_TXQ_FLAG_TXDESCINT_ENABLE; qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); if (qnum < 0) { /* * NB: don't print a message, this happens * normally on parts with too few tx queues */ return ERR_PTR(qnum); } txq = &ah->txqs[qnum]; if (!txq->setup) { txq->qnum = qnum; txq->link = NULL; INIT_LIST_HEAD(&txq->q); spin_lock_init(&txq->lock); txq->setup = true; txq->txq_len = 0; txq->txq_max = ATH5K_TXQ_LEN_MAX; txq->txq_poll_mark = false; txq->txq_stuck = 0; } return &ah->txqs[qnum]; } static int ath5k_beaconq_setup(struct ath5k_hw *ah) { struct ath5k_txq_info qi = { /* XXX: default values not correct for B and XR channels, * but who cares? */ .tqi_aifs = AR5K_TUNE_AIFS, .tqi_cw_min = AR5K_TUNE_CWMIN, .tqi_cw_max = AR5K_TUNE_CWMAX, /* NB: for dynamic turbo, don't enable any other interrupts */ .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE }; return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); } static int ath5k_beaconq_config(struct ath5k_hw *ah) { struct ath5k_txq_info qi; int ret; ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi); if (ret) goto err; if (ah->opmode == NL80211_IFTYPE_AP || ah->opmode == NL80211_IFTYPE_MESH_POINT) { /* * Always burst out beacon and CAB traffic * (aifs = cwmin = cwmax = 0) */ qi.tqi_aifs = 0; qi.tqi_cw_min = 0; qi.tqi_cw_max = 0; } else if (ah->opmode == NL80211_IFTYPE_ADHOC) { /* * Adhoc mode; backoff between 0 and (2 * cw_min). */ qi.tqi_aifs = 0; qi.tqi_cw_min = 0; qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN; } ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n", qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max); ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi); if (ret) { ATH5K_ERR(ah, "%s: unable to update parameters for beacon " "hardware queue!\n", __func__); goto err; } ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */ if (ret) goto err; /* reconfigure cabq with ready time to 80% of beacon_interval */ ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); if (ret) goto err; qi.tqi_ready_time = (ah->bintval * 80) / 100; ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); if (ret) goto err; ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB); err: return ret; } /** * ath5k_drain_tx_buffs - Empty tx buffers * * @ah The &struct ath5k_hw * * Empty tx buffers from all queues in preparation * of a reset or during shutdown. * * NB: this assumes output has been stopped and * we do not need to block ath5k_tx_tasklet */ static void ath5k_drain_tx_buffs(struct ath5k_hw *ah) { struct ath5k_txq *txq; struct ath5k_buf *bf, *bf0; int i; for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { if (ah->txqs[i].setup) { txq = &ah->txqs[i]; spin_lock_bh(&txq->lock); list_for_each_entry_safe(bf, bf0, &txq->q, list) { ath5k_debug_printtxbuf(ah, bf); ath5k_txbuf_free_skb(ah, bf); spin_lock(&ah->txbuflock); list_move_tail(&bf->list, &ah->txbuf); ah->txbuf_len++; txq->txq_len--; spin_unlock(&ah->txbuflock); } txq->link = NULL; txq->txq_poll_mark = false; spin_unlock_bh(&txq->lock); } } } static void ath5k_txq_release(struct ath5k_hw *ah) { struct ath5k_txq *txq = ah->txqs; unsigned int i; for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++) if (txq->setup) { ath5k_hw_release_tx_queue(ah, txq->qnum); txq->setup = false; } } /*************\ * RX Handling * \*************/ /* * Enable the receive h/w following a reset. */ static int ath5k_rx_start(struct ath5k_hw *ah) { struct ath_common *common = ath5k_hw_common(ah); struct ath5k_buf *bf; int ret; common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz); ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n", common->cachelsz, common->rx_bufsize); spin_lock_bh(&ah->rxbuflock); ah->rxlink = NULL; list_for_each_entry(bf, &ah->rxbuf, list) { ret = ath5k_rxbuf_setup(ah, bf); if (ret != 0) { spin_unlock_bh(&ah->rxbuflock); goto err; } } bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list); ath5k_hw_set_rxdp(ah, bf->daddr); spin_unlock_bh(&ah->rxbuflock); ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */ ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */ ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ return 0; err: return ret; } /* * Disable the receive logic on PCU (DRU) * In preparation for a shutdown. * * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop * does. */ static void ath5k_rx_stop(struct ath5k_hw *ah) { ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ ath5k_hw_stop_rx_pcu(ah); /* disable PCU */ ath5k_debug_printrxbuffs(ah); } static unsigned int ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb, struct ath5k_rx_status *rs) { struct ath_common *common = ath5k_hw_common(ah); struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int keyix, hlen; if (!(rs->rs_status & AR5K_RXERR_DECRYPT) && rs->rs_keyix != AR5K_RXKEYIX_INVALID) return RX_FLAG_DECRYPTED; /* Apparently when a default key is used to decrypt the packet the hw does not set the index used to decrypt. In such cases get the index from the packet. */ hlen = ieee80211_hdrlen(hdr->frame_control); if (ieee80211_has_protected(hdr->frame_control) && !(rs->rs_status & AR5K_RXERR_DECRYPT) && skb->len >= hlen + 4) { keyix = skb->data[hlen + 3] >> 6; if (test_bit(keyix, common->keymap)) return RX_FLAG_DECRYPTED; } return 0; } static void ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb, struct ieee80211_rx_status *rxs) { struct ath_common *common = ath5k_hw_common(ah); u64 tsf, bc_tstamp; u32 hw_tu; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; if (ieee80211_is_beacon(mgmt->frame_control) && le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && ether_addr_equal(mgmt->bssid, common->curbssid)) { /* * Received an IBSS beacon with the same BSSID. Hardware *must* * have updated the local TSF. We have to work around various * hardware bugs, though... */ tsf = ath5k_hw_get_tsf64(ah); bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp); hw_tu = TSF_TO_TU(tsf); ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "beacon %llx mactime %llx (diff %lld) tsf now %llx\n", (unsigned long long)bc_tstamp, (unsigned long long)rxs->mactime, (unsigned long long)(rxs->mactime - bc_tstamp), (unsigned long long)tsf); /* * Sometimes the HW will give us a wrong tstamp in the rx * status, causing the timestamp extension to go wrong. * (This seems to happen especially with beacon frames bigger * than 78 byte (incl. FCS)) * But we know that the receive timestamp must be later than the * timestamp of the beacon since HW must have synced to that. * * NOTE: here we assume mactime to be after the frame was * received, not like mac80211 which defines it at the start. */ if (bc_tstamp > rxs->mactime) { ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "fixing mactime from %llx to %llx\n", (unsigned long long)rxs->mactime, (unsigned long long)tsf); rxs->mactime = tsf; } /* * Local TSF might have moved higher than our beacon timers, * in that case we have to update them to continue sending * beacons. This also takes care of synchronizing beacon sending * times with other stations. */ if (hw_tu >= ah->nexttbtt) ath5k_beacon_update_timers(ah, bc_tstamp); /* Check if the beacon timers are still correct, because a TSF * update might have created a window between them - for a * longer description see the comment of this function: */ if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) { ath5k_beacon_update_timers(ah, bc_tstamp); ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "fixed beacon timers after beacon receive\n"); } } } static void ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; struct ath_common *common = ath5k_hw_common(ah); /* only beacons from our BSSID */ if (!ieee80211_is_beacon(mgmt->frame_control) || !ether_addr_equal(mgmt->bssid, common->curbssid)) return; ewma_add(&ah->ah_beacon_rssi_avg, rssi); /* in IBSS mode we should keep RSSI statistics per neighbour */ /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */ } /* * Compute padding position. skb must contain an IEEE 802.11 frame */ static int ath5k_common_padpos(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 frame_control = hdr->frame_control; int padpos = 24; if (ieee80211_has_a4(frame_control)) padpos += ETH_ALEN; if (ieee80211_is_data_qos(frame_control)) padpos += IEEE80211_QOS_CTL_LEN; return padpos; } /* * This function expects an 802.11 frame and returns the number of * bytes added, or -1 if we don't have enough header room. */ static int ath5k_add_padding(struct sk_buff *skb) { int padpos = ath5k_common_padpos(skb); int padsize = padpos & 3; if (padsize && skb->len > padpos) { if (skb_headroom(skb) < padsize) return -1; skb_push(skb, padsize); memmove(skb->data, skb->data + padsize, padpos); return padsize; } return 0; } /* * The MAC header is padded to have 32-bit boundary if the * packet payload is non-zero. The general calculation for * padsize would take into account odd header lengths: * padsize = 4 - (hdrlen & 3); however, since only * even-length headers are used, padding can only be 0 or 2 * bytes and we can optimize this a bit. We must not try to * remove padding from short control frames that do not have a * payload. * * This function expects an 802.11 frame and returns the number of * bytes removed. */ static int ath5k_remove_padding(struct sk_buff *skb) { int padpos = ath5k_common_padpos(skb); int padsize = padpos & 3; if (padsize && skb->len >= padpos + padsize) { memmove(skb->data + padsize, skb->data, padpos); skb_pull(skb, padsize); return padsize; } return 0; } static void ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, struct ath5k_rx_status *rs) { struct ieee80211_rx_status *rxs; ath5k_remove_padding(skb); rxs = IEEE80211_SKB_RXCB(skb); rxs->flag = 0; if (unlikely(rs->rs_status & AR5K_RXERR_MIC)) rxs->flag |= RX_FLAG_MMIC_ERROR; /* * always extend the mac timestamp, since this information is * also needed for proper IBSS merging. * * XXX: it might be too late to do it here, since rs_tstamp is * 15bit only. that means TSF extension has to be done within * 32768usec (about 32ms). it might be necessary to move this to * the interrupt handler, like it is done in madwifi. */ rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp); rxs->flag |= RX_FLAG_MACTIME_END; rxs->freq = ah->curchan->center_freq; rxs->band = ah->curchan->band; rxs->signal = ah->ah_noise_floor + rs->rs_rssi; rxs->antenna = rs->rs_antenna; if (rs->rs_antenna > 0 && rs->rs_antenna < 5) ah->stats.antenna_rx[rs->rs_antenna]++; else ah->stats.antenna_rx[0]++; /* invalid */ rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate); rxs->flag |= ath5k_rx_decrypted(ah, skb, rs); if (rxs->rate_idx >= 0 && rs->rs_rate == ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short) rxs->flag |= RX_FLAG_SHORTPRE; trace_ath5k_rx(ah, skb); ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi); /* check beacons in IBSS mode */ if (ah->opmode == NL80211_IFTYPE_ADHOC) ath5k_check_ibss_tsf(ah, skb, rxs); ieee80211_rx(ah->hw, skb); } /** ath5k_frame_receive_ok() - Do we want to receive this frame or not? * * Check if we want to further process this frame or not. Also update * statistics. Return true if we want this frame, false if not. */ static bool ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs) { ah->stats.rx_all_count++; ah->stats.rx_bytes_count += rs->rs_datalen; if (unlikely(rs->rs_status)) { if (rs->rs_status & AR5K_RXERR_CRC) ah->stats.rxerr_crc++; if (rs->rs_status & AR5K_RXERR_FIFO) ah->stats.rxerr_fifo++; if (rs->rs_status & AR5K_RXERR_PHY) { ah->stats.rxerr_phy++; if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32) ah->stats.rxerr_phy_code[rs->rs_phyerr]++; return false; } if (rs->rs_status & AR5K_RXERR_DECRYPT) { /* * Decrypt error. If the error occurred * because there was no hardware key, then * let the frame through so the upper layers * can process it. This is necessary for 5210 * parts which have no way to setup a ``clear'' * key cache entry. * * XXX do key cache faulting */ ah->stats.rxerr_decrypt++; if (rs->rs_keyix == AR5K_RXKEYIX_INVALID && !(rs->rs_status & AR5K_RXERR_CRC)) return true; } if (rs->rs_status & AR5K_RXERR_MIC) { ah->stats.rxerr_mic++; return true; } /* reject any frames with non-crypto errors */ if (rs->rs_status & ~(AR5K_RXERR_DECRYPT)) return false; } if (unlikely(rs->rs_more)) { ah->stats.rxerr_jumbo++; return false; } return true; } static void ath5k_set_current_imask(struct ath5k_hw *ah) { enum ath5k_int imask; unsigned long flags; spin_lock_irqsave(&ah->irqlock, flags); imask = ah->imask; if (ah->rx_pending) imask &= ~AR5K_INT_RX_ALL; if (ah->tx_pending) imask &= ~AR5K_INT_TX_ALL; ath5k_hw_set_imr(ah, imask); spin_unlock_irqrestore(&ah->irqlock, flags); } static void ath5k_tasklet_rx(unsigned long data) { struct ath5k_rx_status rs = {}; struct sk_buff *skb, *next_skb; dma_addr_t next_skb_addr; struct ath5k_hw *ah = (void *)data; struct ath_common *common = ath5k_hw_common(ah); struct ath5k_buf *bf; struct ath5k_desc *ds; int ret; spin_lock(&ah->rxbuflock); if (list_empty(&ah->rxbuf)) { ATH5K_WARN(ah, "empty rx buf pool\n"); goto unlock; } do { bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list); BUG_ON(bf->skb == NULL); skb = bf->skb; ds = bf->desc; /* bail if HW is still using self-linked descriptor */ if (ath5k_hw_get_rxdp(ah) == bf->daddr) break; ret = ah->ah_proc_rx_desc(ah, ds, &rs); if (unlikely(ret == -EINPROGRESS)) break; else if (unlikely(ret)) { ATH5K_ERR(ah, "error in processing rx descriptor\n"); ah->stats.rxerr_proc++; break; } if (ath5k_receive_frame_ok(ah, &rs)) { next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr); /* * If we can't replace bf->skb with a new skb under * memory pressure, just skip this packet */ if (!next_skb) goto next; dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize, DMA_FROM_DEVICE); skb_put(skb, rs.rs_datalen); ath5k_receive_frame(ah, skb, &rs); bf->skb = next_skb; bf->skbaddr = next_skb_addr; } next: list_move_tail(&bf->list, &ah->rxbuf); } while (ath5k_rxbuf_setup(ah, bf) == 0); unlock: spin_unlock(&ah->rxbuflock); ah->rx_pending = false; ath5k_set_current_imask(ah); } /*************\ * TX Handling * \*************/ void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, struct ath5k_txq *txq) { struct ath5k_hw *ah = hw->priv; struct ath5k_buf *bf; unsigned long flags; int padsize; trace_ath5k_tx(ah, skb, txq); /* * The hardware expects the header padded to 4 byte boundaries. * If this is not the case, we add the padding after the header. */ padsize = ath5k_add_padding(skb); if (padsize < 0) { ATH5K_ERR(ah, "tx hdrlen not %%4: not enough" " headroom to pad"); goto drop_packet; } if (txq->txq_len >= txq->txq_max && txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX) ieee80211_stop_queue(hw, txq->qnum); spin_lock_irqsave(&ah->txbuflock, flags); if (list_empty(&ah->txbuf)) { ATH5K_ERR(ah, "no further txbuf available, dropping packet\n"); spin_unlock_irqrestore(&ah->txbuflock, flags); ieee80211_stop_queues(hw); goto drop_packet; } bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list); list_del(&bf->list); ah->txbuf_len--; if (list_empty(&ah->txbuf)) ieee80211_stop_queues(hw); spin_unlock_irqrestore(&ah->txbuflock, flags); bf->skb = skb; if (ath5k_txbuf_setup(ah, bf, txq, padsize)) { bf->skb = NULL; spin_lock_irqsave(&ah->txbuflock, flags); list_add_tail(&bf->list, &ah->txbuf); ah->txbuf_len++; spin_unlock_irqrestore(&ah->txbuflock, flags); goto drop_packet; } return; drop_packet: ieee80211_free_txskb(hw, skb); } static void ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb, struct ath5k_txq *txq, struct ath5k_tx_status *ts) { struct ieee80211_tx_info *info; u8 tries[3]; int i; ah->stats.tx_all_count++; ah->stats.tx_bytes_count += skb->len; info = IEEE80211_SKB_CB(skb); tries[0] = info->status.rates[0].count; tries[1] = info->status.rates[1].count; tries[2] = info->status.rates[2].count; ieee80211_tx_info_clear_status(info); for (i = 0; i < ts->ts_final_idx; i++) { struct ieee80211_tx_rate *r = &info->status.rates[i]; r->count = tries[i]; } info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry; info->status.rates[ts->ts_final_idx + 1].idx = -1; if (unlikely(ts->ts_status)) { ah->stats.ack_fail++; if (ts->ts_status & AR5K_TXERR_FILT) { info->flags |= IEEE80211_TX_STAT_TX_FILTERED; ah->stats.txerr_filt++; } if (ts->ts_status & AR5K_TXERR_XRETRY) ah->stats.txerr_retry++; if (ts->ts_status & AR5K_TXERR_FIFO) ah->stats.txerr_fifo++; } else { info->flags |= IEEE80211_TX_STAT_ACK; info->status.ack_signal = ts->ts_rssi; /* count the successful attempt as well */ info->status.rates[ts->ts_final_idx].count++; } /* * Remove MAC header padding before giving the frame * back to mac80211. */ ath5k_remove_padding(skb); if (ts->ts_antenna > 0 && ts->ts_antenna < 5) ah->stats.antenna_tx[ts->ts_antenna]++; else ah->stats.antenna_tx[0]++; /* invalid */ trace_ath5k_tx_complete(ah, skb, txq, ts); ieee80211_tx_status(ah->hw, skb); } static void ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq) { struct ath5k_tx_status ts = {}; struct ath5k_buf *bf, *bf0; struct ath5k_desc *ds; struct sk_buff *skb; int ret; spin_lock(&txq->lock); list_for_each_entry_safe(bf, bf0, &txq->q, list) { txq->txq_poll_mark = false; /* skb might already have been processed last time. */ if (bf->skb != NULL) { ds = bf->desc; ret = ah->ah_proc_tx_desc(ah, ds, &ts); if (unlikely(ret == -EINPROGRESS)) break; else if (unlikely(ret)) { ATH5K_ERR(ah, "error %d while processing " "queue %u\n", ret, txq->qnum); break; } skb = bf->skb; bf->skb = NULL; dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE); ath5k_tx_frame_completed(ah, skb, txq, &ts); } /* * It's possible that the hardware can say the buffer is * completed when it hasn't yet loaded the ds_link from * host memory and moved on. * Always keep the last descriptor to avoid HW races... */ if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) { spin_lock(&ah->txbuflock); list_move_tail(&bf->list, &ah->txbuf); ah->txbuf_len++; txq->txq_len--; spin_unlock(&ah->txbuflock); } } spin_unlock(&txq->lock); if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4) ieee80211_wake_queue(ah->hw, txq->qnum); } static void ath5k_tasklet_tx(unsigned long data) { int i; struct ath5k_hw *ah = (void *)data; for (i = 0; i < AR5K_NUM_TX_QUEUES; i++) if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i))) ath5k_tx_processq(ah, &ah->txqs[i]); ah->tx_pending = false; ath5k_set_current_imask(ah); } /*****************\ * Beacon handling * \*****************/ /* * Setup the beacon frame for transmit. */ static int ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf) { struct sk_buff *skb = bf->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ath5k_desc *ds; int ret = 0; u8 antenna; u32 flags; const int padsize = 0; bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len, DMA_TO_DEVICE); ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " "skbaddr %llx\n", skb, skb->data, skb->len, (unsigned long long)bf->skbaddr); if (dma_mapping_error(ah->dev, bf->skbaddr)) { ATH5K_ERR(ah, "beacon DMA mapping failed\n"); dev_kfree_skb_any(skb); bf->skb = NULL; return -EIO; } ds = bf->desc; antenna = ah->ah_tx_ant; flags = AR5K_TXDESC_NOACK; if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) { ds->ds_link = bf->daddr; /* self-linked */ flags |= AR5K_TXDESC_VEOL; } else ds->ds_link = 0; /* * If we use multiple antennas on AP and use * the Sectored AP scenario, switch antenna every * 4 beacons to make sure everybody hears our AP. * When a client tries to associate, hw will keep * track of the tx antenna to be used for this client * automatically, based on ACKed packets. * * Note: AP still listens and transmits RTS on the * default antenna which is supposed to be an omni. * * Note2: On sectored scenarios it's possible to have * multiple antennas (1 omni -- the default -- and 14 * sectors), so if we choose to actually support this * mode, we need to allow the user to set how many antennas * we have and tweak the code below to send beacons * on all of them. */ if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP) antenna = ah->bsent & 4 ? 2 : 1; /* FIXME: If we are in g mode and rate is a CCK rate * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta * from tx power (value is in dB units already) */ ds->ds_data = bf->skbaddr; ret = ah->ah_setup_tx_desc(ah, ds, skb->len, ieee80211_get_hdrlen_from_skb(skb), padsize, AR5K_PKT_TYPE_BEACON, (ah->ah_txpower.txp_requested * 2), ieee80211_get_tx_rate(ah->hw, info)->hw_value, 1, AR5K_TXKEYIX_INVALID, antenna, flags, 0, 0); if (ret) goto err_unmap; return 0; err_unmap: dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE); return ret; } /* * Updates the beacon that is sent by ath5k_beacon_send. For adhoc, * this is called only once at config_bss time, for AP we do it every * SWBA interrupt so that the TIM will reflect buffered frames. * * Called with the beacon lock. */ int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { int ret; struct ath5k_hw *ah = hw->priv; struct ath5k_vif *avf; struct sk_buff *skb; if (WARN_ON(!vif)) { ret = -EINVAL; goto out; } skb = ieee80211_beacon_get(hw, vif); if (!skb) { ret = -ENOMEM; goto out; } avf = (void *)vif->drv_priv; ath5k_txbuf_free_skb(ah, avf->bbuf); avf->bbuf->skb = skb; ret = ath5k_beacon_setup(ah, avf->bbuf); out: return ret; } /* * Transmit a beacon frame at SWBA. Dynamic updates to the * frame contents are done as needed and the slot time is * also adjusted based on current state. * * This is called from software irq context (beacontq tasklets) * or user context from ath5k_beacon_config. */ static void ath5k_beacon_send(struct ath5k_hw *ah) { struct ieee80211_vif *vif; struct ath5k_vif *avf; struct ath5k_buf *bf; struct sk_buff *skb; int err; ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n"); /* * Check if the previous beacon has gone out. If * not, don't don't try to post another: skip this * period and wait for the next. Missed beacons * indicate a problem and should not occur. If we * miss too many consecutive beacons reset the device. */ if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) { ah->bmisscount++; ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "missed %u consecutive beacons\n", ah->bmisscount); if (ah->bmisscount > 10) { /* NB: 10 is a guess */ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "stuck beacon time (%u missed)\n", ah->bmisscount); ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "stuck beacon, resetting\n"); ieee80211_queue_work(ah->hw, &ah->reset_work); } return; } if (unlikely(ah->bmisscount != 0)) { ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "resume beacon xmit after %u misses\n", ah->bmisscount); ah->bmisscount = 0; } if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs + ah->num_mesh_vifs > 1) || ah->opmode == NL80211_IFTYPE_MESH_POINT) { u64 tsf = ath5k_hw_get_tsf64(ah); u32 tsftu = TSF_TO_TU(tsf); int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval; vif = ah->bslot[(slot + 1) % ATH_BCBUF]; ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "tsf %llx tsftu %x intval %u slot %u vif %p\n", (unsigned long long)tsf, tsftu, ah->bintval, slot, vif); } else /* only one interface */ vif = ah->bslot[0]; if (!vif) return; avf = (void *)vif->drv_priv; bf = avf->bbuf; /* * Stop any current dma and put the new frame on the queue. * This should never fail since we check above that no frames * are still pending on the queue. */ if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) { ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq); /* NB: hw still stops DMA, so proceed */ } /* refresh the beacon for AP or MESH mode */ if (ah->opmode == NL80211_IFTYPE_AP || ah->opmode == NL80211_IFTYPE_MESH_POINT) { err = ath5k_beacon_update(ah->hw, vif); if (err) return; } if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION || ah->opmode == NL80211_IFTYPE_MONITOR)) { ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb); return; } trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]); ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr); ath5k_hw_start_tx_dma(ah, ah->bhalq); ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", ah->bhalq, (unsigned long long)bf->daddr, bf->desc); skb = ieee80211_get_buffered_bc(ah->hw, vif); while (skb) { ath5k_tx_queue(ah->hw, skb, ah->cabq); if (ah->cabq->txq_len >= ah->cabq->txq_max) break; skb = ieee80211_get_buffered_bc(ah->hw, vif); } ah->bsent++; } /** * ath5k_beacon_update_timers - update beacon timers * * @ah: struct ath5k_hw pointer we are operating on * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a * beacon timer update based on the current HW TSF. * * Calculate the next target beacon transmit time (TBTT) based on the timestamp * of a received beacon or the current local hardware TSF and write it to the * beacon timer registers. * * This is called in a variety of situations, e.g. when a beacon is received, * when a TSF update has been detected, but also when an new IBSS is created or * when we otherwise know we have to update the timers, but we keep it in this * function to have it all together in one place. */ void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf) { u32 nexttbtt, intval, hw_tu, bc_tu; u64 hw_tsf; intval = ah->bintval & AR5K_BEACON_PERIOD; if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs + ah->num_mesh_vifs > 1) { intval /= ATH_BCBUF; /* staggered multi-bss beacons */ if (intval < 15) ATH5K_WARN(ah, "intval %u is too low, min 15\n", intval); } if (WARN_ON(!intval)) return; /* beacon TSF converted to TU */ bc_tu = TSF_TO_TU(bc_tsf); /* current TSF converted to TU */ hw_tsf = ath5k_hw_get_tsf64(ah); hw_tu = TSF_TO_TU(hw_tsf); #define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3) /* We use FUDGE to make sure the next TBTT is ahead of the current TU. * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer * configuration we need to make sure it is bigger than that. */ if (bc_tsf == -1) { /* * no beacons received, called internally. * just need to refresh timers based on HW TSF. */ nexttbtt = roundup(hw_tu + FUDGE, intval); } else if (bc_tsf == 0) { /* * no beacon received, probably called by ath5k_reset_tsf(). * reset TSF to start with 0. */ nexttbtt = intval; intval |= AR5K_BEACON_RESET_TSF; } else if (bc_tsf > hw_tsf) { /* * beacon received, SW merge happened but HW TSF not yet updated. * not possible to reconfigure timers yet, but next time we * receive a beacon with the same BSSID, the hardware will * automatically update the TSF and then we need to reconfigure * the timers. */ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "need to wait for HW TSF sync\n"); return; } else { /* * most important case for beacon synchronization between STA. * * beacon received and HW TSF has been already updated by HW. * update next TBTT based on the TSF of the beacon, but make * sure it is ahead of our local TSF timer. */ nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval); } #undef FUDGE ah->nexttbtt = nexttbtt; intval |= AR5K_BEACON_ENA; ath5k_hw_init_beacon_timers(ah, nexttbtt, intval); /* * debugging output last in order to preserve the time critical aspect * of this function */ if (bc_tsf == -1) ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "reconfigured timers based on HW TSF\n"); else if (bc_tsf == 0) ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "reset HW TSF and timers\n"); else ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "updated timers based on beacon TSF\n"); ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n", (unsigned long long) bc_tsf, (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt); ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n", intval & AR5K_BEACON_PERIOD, intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "", intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : ""); } /** * ath5k_beacon_config - Configure the beacon queues and interrupts * * @ah: struct ath5k_hw pointer we are operating on * * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA * interrupts to detect TSF updates only. */ void ath5k_beacon_config(struct ath5k_hw *ah) { spin_lock_bh(&ah->block); ah->bmisscount = 0; ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); if (ah->enable_beacon) { /* * In IBSS mode we use a self-linked tx descriptor and let the * hardware send the beacons automatically. We have to load it * only once here. * We use the SWBA interrupt only to keep track of the beacon * timers in order to detect automatic TSF updates. */ ath5k_beaconq_config(ah); ah->imask |= AR5K_INT_SWBA; if (ah->opmode == NL80211_IFTYPE_ADHOC) { if (ath5k_hw_hasveol(ah)) ath5k_beacon_send(ah); } else ath5k_beacon_update_timers(ah, -1); } else { ath5k_hw_stop_beacon_queue(ah, ah->bhalq); } ath5k_hw_set_imr(ah, ah->imask); mmiowb(); spin_unlock_bh(&ah->block); } static void ath5k_tasklet_beacon(unsigned long data) { struct ath5k_hw *ah = (struct ath5k_hw *) data; /* * Software beacon alert--time to send a beacon. * * In IBSS mode we use this interrupt just to * keep track of the next TBTT (target beacon * transmission time) in order to detect whether * automatic TSF updates happened. */ if (ah->opmode == NL80211_IFTYPE_ADHOC) { /* XXX: only if VEOL supported */ u64 tsf = ath5k_hw_get_tsf64(ah); ah->nexttbtt += ah->bintval; ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "SWBA nexttbtt: %x hw_tu: %x " "TSF: %llx\n", ah->nexttbtt, TSF_TO_TU(tsf), (unsigned long long) tsf); } else { spin_lock(&ah->block); ath5k_beacon_send(ah); spin_unlock(&ah->block); } } /********************\ * Interrupt handling * \********************/ static void ath5k_intr_calibration_poll(struct ath5k_hw *ah) { if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) && !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) && !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) { /* Run ANI only when calibration is not active */ ah->ah_cal_next_ani = jiffies + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); tasklet_schedule(&ah->ani_tasklet); } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) && !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) && !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) { /* Run calibration only when another calibration * is not running. * * Note: This is for both full/short calibration, * if it's time for a full one, ath5k_calibrate_work will deal * with it. */ ah->ah_cal_next_short = jiffies + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); ieee80211_queue_work(ah->hw, &ah->calib_work); } /* we could use SWI to generate enough interrupts to meet our * calibration interval requirements, if necessary: * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */ } static void ath5k_schedule_rx(struct ath5k_hw *ah) { ah->rx_pending = true; tasklet_schedule(&ah->rxtq); } static void ath5k_schedule_tx(struct ath5k_hw *ah) { ah->tx_pending = true; tasklet_schedule(&ah->txtq); } static irqreturn_t ath5k_intr(int irq, void *dev_id) { struct ath5k_hw *ah = dev_id; enum ath5k_int status; unsigned int counter = 1000; /* * If hw is not ready (or detached) and we get an * interrupt, or if we have no interrupts pending * (that means it's not for us) skip it. * * NOTE: Group 0/1 PCI interface registers are not * supported on WiSOCs, so we can't check for pending * interrupts (ISR belongs to another register group * so we are ok). */ if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) || ((ath5k_get_bus_type(ah) != ATH_AHB) && !ath5k_hw_is_intr_pending(ah)))) return IRQ_NONE; /** Main loop **/ do { ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n", status, ah->imask); /* * Fatal hw error -> Log and reset * * Fatal errors are unrecoverable so we have to * reset the card. These errors include bus and * dma errors. */ if (unlikely(status & AR5K_INT_FATAL)) { ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "fatal int, resetting\n"); ieee80211_queue_work(ah->hw, &ah->reset_work); /* * RX Overrun -> Count and reset if needed * * Receive buffers are full. Either the bus is busy or * the CPU is not fast enough to process all received * frames. */ } else if (unlikely(status & AR5K_INT_RXORN)) { /* * Older chipsets need a reset to come out of this * condition, but we treat it as RX for newer chips. * We don't know exactly which versions need a reset * this guess is copied from the HAL. */ ah->stats.rxorn_intr++; if (ah->ah_mac_srev < AR5K_SREV_AR5212) { ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "rx overrun, resetting\n"); ieee80211_queue_work(ah->hw, &ah->reset_work); } else ath5k_schedule_rx(ah); } else { /* Software Beacon Alert -> Schedule beacon tasklet */ if (status & AR5K_INT_SWBA) tasklet_hi_schedule(&ah->beacontq); /* * No more RX descriptors -> Just count * * NB: the hardware should re-read the link when * RXE bit is written, but it doesn't work at * least on older hardware revs. */ if (status & AR5K_INT_RXEOL) ah->stats.rxeol_intr++; /* TX Underrun -> Bump tx trigger level */ if (status & AR5K_INT_TXURN) ath5k_hw_update_tx_triglevel(ah, true); /* RX -> Schedule rx tasklet */ if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR)) ath5k_schedule_rx(ah); /* TX -> Schedule tx tasklet */ if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC | AR5K_INT_TXERR | AR5K_INT_TXEOL)) ath5k_schedule_tx(ah); /* Missed beacon -> TODO if (status & AR5K_INT_BMISS) */ /* MIB event -> Update counters and notify ANI */ if (status & AR5K_INT_MIB) { ah->stats.mib_intr++; ath5k_hw_update_mib_counters(ah); ath5k_ani_mib_intr(ah); } /* GPIO -> Notify RFKill layer */ if (status & AR5K_INT_GPIO) tasklet_schedule(&ah->rf_kill.toggleq); } if (ath5k_get_bus_type(ah) == ATH_AHB) break; } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); /* * Until we handle rx/tx interrupts mask them on IMR * * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets * and unset after we 've handled the interrupts. */ if (ah->rx_pending || ah->tx_pending) ath5k_set_current_imask(ah); if (unlikely(!counter)) ATH5K_WARN(ah, "too many interrupts, giving up for now\n"); /* Fire up calibration poll */ ath5k_intr_calibration_poll(ah); return IRQ_HANDLED; } /* * Periodically recalibrate the PHY to account * for temperature/environment changes. */ static void ath5k_calibrate_work(struct work_struct *work) { struct ath5k_hw *ah = container_of(work, struct ath5k_hw, calib_work); /* Should we run a full calibration ? */ if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) { ah->ah_cal_next_full = jiffies + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); ah->ah_cal_mask |= AR5K_CALIBRATION_FULL; ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "running full calibration\n"); if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) { /* * Rfgain is out of bounds, reset the chip * to load new gain values. */ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "got new rfgain, resetting\n"); ieee80211_queue_work(ah->hw, &ah->reset_work); } } else ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT; ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", ieee80211_frequency_to_channel(ah->curchan->center_freq), ah->curchan->hw_value); if (ath5k_hw_phy_calibrate(ah, ah->curchan)) ATH5K_ERR(ah, "calibration of channel %u failed\n", ieee80211_frequency_to_channel( ah->curchan->center_freq)); /* Clear calibration flags */ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT) ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT; } static void ath5k_tasklet_ani(unsigned long data) { struct ath5k_hw *ah = (void *)data; ah->ah_cal_mask |= AR5K_CALIBRATION_ANI; ath5k_ani_calibration(ah); ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI; } static void ath5k_tx_complete_poll_work(struct work_struct *work) { struct ath5k_hw *ah = container_of(work, struct ath5k_hw, tx_complete_work.work); struct ath5k_txq *txq; int i; bool needreset = false; if (!test_bit(ATH_STAT_STARTED, ah->status)) return; mutex_lock(&ah->lock); for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { if (ah->txqs[i].setup) { txq = &ah->txqs[i]; spin_lock_bh(&txq->lock); if (txq->txq_len > 1) { if (txq->txq_poll_mark) { ATH5K_DBG(ah, ATH5K_DEBUG_XMIT, "TX queue stuck %d\n", txq->qnum); needreset = true; txq->txq_stuck++; spin_unlock_bh(&txq->lock); break; } else { txq->txq_poll_mark = true; } } spin_unlock_bh(&txq->lock); } } if (needreset) { ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "TX queues stuck, resetting\n"); ath5k_reset(ah, NULL, true); } mutex_unlock(&ah->lock); ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); } /*************************\ * Initialization routines * \*************************/ static const struct ieee80211_iface_limit if_limits[] = { { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) }, { .max = 4, .types = #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif BIT(NL80211_IFTYPE_AP) }, }; static const struct ieee80211_iface_combination if_comb = { .limits = if_limits, .n_limits = ARRAY_SIZE(if_limits), .max_interfaces = 2048, .num_different_channels = 1, }; int ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) { struct ieee80211_hw *hw = ah->hw; struct ath_common *common; int ret; int csz; /* Initialize driver private data */ SET_IEEE80211_DEV(hw, ah->dev); hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_MFP_CAPABLE | IEEE80211_HW_REPORTS_TX_ACK_STATUS; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MESH_POINT); hw->wiphy->iface_combinations = &if_comb; hw->wiphy->n_iface_combinations = 1; /* SW support for IBSS_RSN is provided by mac80211 */ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; /* both antennas can be configured as RX or TX */ hw->wiphy->available_antennas_tx = 0x3; hw->wiphy->available_antennas_rx = 0x3; hw->extra_tx_headroom = 2; hw->channel_change_time = 5000; /* * Mark the device as detached to avoid processing * interrupts until setup is complete. */ __set_bit(ATH_STAT_INVALID, ah->status); ah->opmode = NL80211_IFTYPE_STATION; ah->bintval = 1000; mutex_init(&ah->lock); spin_lock_init(&ah->rxbuflock); spin_lock_init(&ah->txbuflock); spin_lock_init(&ah->block); spin_lock_init(&ah->irqlock); /* Setup interrupt handler */ ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah); if (ret) { ATH5K_ERR(ah, "request_irq failed\n"); goto err; } common = ath5k_hw_common(ah); common->ops = &ath5k_common_ops; common->bus_ops = bus_ops; common->ah = ah; common->hw = hw; common->priv = ah; common->clockrate = 40; /* * Cache line size is used to size and align various * structures used to communicate with the hardware. */ ath5k_read_cachesize(common, &csz); common->cachelsz = csz << 2; /* convert to bytes */ spin_lock_init(&common->cc_lock); /* Initialize device */ ret = ath5k_hw_init(ah); if (ret) goto err_irq; /* Set up multi-rate retry capabilities */ if (ah->ah_capabilities.cap_has_mrr_support) { hw->max_rates = 4; hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT, AR5K_INIT_RETRY_LONG); } hw->vif_data_size = sizeof(struct ath5k_vif); /* Finish private driver data initialization */ ret = ath5k_init(hw); if (ret) goto err_ah; ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n", ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev), ah->ah_mac_srev, ah->ah_phy_revision); if (!ah->ah_single_chip) { /* Single chip radio (!RF5111) */ if (ah->ah_radio_5ghz_revision && !ah->ah_radio_2ghz_revision) { /* No 5GHz support -> report 2GHz radio */ if (!test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) { ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n", ath5k_chip_name(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision), ah->ah_radio_5ghz_revision); /* No 2GHz support (5110 and some * 5GHz only cards) -> report 5GHz radio */ } else if (!test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) { ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n", ath5k_chip_name(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision), ah->ah_radio_5ghz_revision); /* Multiband radio */ } else { ATH5K_INFO(ah, "RF%s multiband radio found" " (0x%x)\n", ath5k_chip_name(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision), ah->ah_radio_5ghz_revision); } } /* Multi chip radio (RF5111 - RF2111) -> * report both 2GHz/5GHz radios */ else if (ah->ah_radio_5ghz_revision && ah->ah_radio_2ghz_revision) { ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n", ath5k_chip_name(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision), ah->ah_radio_5ghz_revision); ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n", ath5k_chip_name(AR5K_VERSION_RAD, ah->ah_radio_2ghz_revision), ah->ah_radio_2ghz_revision); } } ath5k_debug_init_device(ah); /* ready to process interrupts */ __clear_bit(ATH_STAT_INVALID, ah->status); return 0; err_ah: ath5k_hw_deinit(ah); err_irq: free_irq(ah->irq, ah); err: return ret; } static int ath5k_stop_locked(struct ath5k_hw *ah) { ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n", test_bit(ATH_STAT_INVALID, ah->status)); /* * Shutdown the hardware and driver: * stop output from above * disable interrupts * turn off timers * turn off the radio * clear transmit machinery * clear receive machinery * drain and release tx queues * reclaim beacon resources * power down hardware * * Note that some of this work is not possible if the * hardware is gone (invalid). */ ieee80211_stop_queues(ah->hw); if (!test_bit(ATH_STAT_INVALID, ah->status)) { ath5k_led_off(ah); ath5k_hw_set_imr(ah, 0); synchronize_irq(ah->irq); ath5k_rx_stop(ah); ath5k_hw_dma_stop(ah); ath5k_drain_tx_buffs(ah); ath5k_hw_phy_disable(ah); } return 0; } int ath5k_start(struct ieee80211_hw *hw) { struct ath5k_hw *ah = hw->priv; struct ath_common *common = ath5k_hw_common(ah); int ret, i; mutex_lock(&ah->lock); ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode); /* * Stop anything previously setup. This is safe * no matter this is the first time through or not. */ ath5k_stop_locked(ah); /* * The basic interface to setting the hardware in a good * state is ``reset''. On return the hardware is known to * be powered up and with interrupts disabled. This must * be followed by initialization of the appropriate bits * and then setup of the interrupt mask. */ ah->curchan = ah->hw->conf.chandef.chan; ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; ret = ath5k_reset(ah, NULL, false); if (ret) goto done; if (!ath5k_modparam_no_hw_rfkill_switch) ath5k_rfkill_hw_start(ah); /* * Reset the key cache since some parts do not reset the * contents on initial power up or resume from suspend. */ for (i = 0; i < common->keymax; i++) ath_hw_keyreset(common, (u16) i); /* Use higher rates for acks instead of base * rate */ ah->ah_ack_bitrate_high = true; for (i = 0; i < ARRAY_SIZE(ah->bslot); i++) ah->bslot[i] = NULL; ret = 0; done: mmiowb(); mutex_unlock(&ah->lock); set_bit(ATH_STAT_STARTED, ah->status); ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); return ret; } static void ath5k_stop_tasklets(struct ath5k_hw *ah) { ah->rx_pending = false; ah->tx_pending = false; tasklet_kill(&ah->rxtq); tasklet_kill(&ah->txtq); tasklet_kill(&ah->beacontq); tasklet_kill(&ah->ani_tasklet); } /* * Stop the device, grabbing the top-level lock to protect * against concurrent entry through ath5k_init (which can happen * if another thread does a system call and the thread doing the * stop is preempted). */ void ath5k_stop(struct ieee80211_hw *hw) { struct ath5k_hw *ah = hw->priv; int ret; mutex_lock(&ah->lock); ret = ath5k_stop_locked(ah); if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) { /* * Don't set the card in full sleep mode! * * a) When the device is in this state it must be carefully * woken up or references to registers in the PCI clock * domain may freeze the bus (and system). This varies * by chip and is mostly an issue with newer parts * (madwifi sources mentioned srev >= 0x78) that go to * sleep more quickly. * * b) On older chips full sleep results a weird behaviour * during wakeup. I tested various cards with srev < 0x78 * and they don't wake up after module reload, a second * module reload is needed to bring the card up again. * * Until we figure out what's going on don't enable * full chip reset on any chip (this is what Legacy HAL * and Sam's HAL do anyway). Instead Perform a full reset * on the device (same as initial state after attach) and * leave it idle (keep MAC/BB on warm reset) */ ret = ath5k_hw_on_hold(ah); ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "putting device to sleep\n"); } mmiowb(); mutex_unlock(&ah->lock); ath5k_stop_tasklets(ah); clear_bit(ATH_STAT_STARTED, ah->status); cancel_delayed_work_sync(&ah->tx_complete_work); if (!ath5k_modparam_no_hw_rfkill_switch) ath5k_rfkill_hw_stop(ah); } /* * Reset the hardware. If chan is not NULL, then also pause rx/tx * and change to the given channel. * * This should be called with ah->lock. */ static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, bool skip_pcu) { struct ath_common *common = ath5k_hw_common(ah); int ret, ani_mode; bool fast; ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n"); ath5k_hw_set_imr(ah, 0); synchronize_irq(ah->irq); ath5k_stop_tasklets(ah); /* Save ani mode and disable ANI during * reset. If we don't we might get false * PHY error interrupts. */ ani_mode = ah->ani_state.ani_mode; ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF); /* We are going to empty hw queues * so we should also free any remaining * tx buffers */ ath5k_drain_tx_buffs(ah); if (chan) ah->curchan = chan; fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0; ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu); if (ret) { ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret); goto err; } ret = ath5k_rx_start(ah); if (ret) { ATH5K_ERR(ah, "can't start recv logic\n"); goto err; } ath5k_ani_init(ah, ani_mode); /* * Set calibration intervals * * Note: We don't need to run calibration imediately * since some initial calibration is done on reset * even for fast channel switching. Also on scanning * this will get set again and again and it won't get * executed unless we connect somewhere and spend some * time on the channel (that's what calibration needs * anyway to be accurate). */ ah->ah_cal_next_full = jiffies + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); ah->ah_cal_next_ani = jiffies + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); ah->ah_cal_next_short = jiffies + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8); /* clear survey data and cycle counters */ memset(&ah->survey, 0, sizeof(ah->survey)); spin_lock_bh(&common->cc_lock); ath_hw_cycle_counters_update(common); memset(&common->cc_survey, 0, sizeof(common->cc_survey)); memset(&common->cc_ani, 0, sizeof(common->cc_ani)); spin_unlock_bh(&common->cc_lock); /* * Change channels and update the h/w rate map if we're switching; * e.g. 11a to 11b/g. * * We may be doing a reset in response to an ioctl that changes the * channel so update any state that might change as a result. * * XXX needed? */ /* ath5k_chan_change(ah, c); */ ath5k_beacon_config(ah); /* intrs are enabled by ath5k_beacon_config */ ieee80211_wake_queues(ah->hw); return 0; err: return ret; } static void ath5k_reset_work(struct work_struct *work) { struct ath5k_hw *ah = container_of(work, struct ath5k_hw, reset_work); mutex_lock(&ah->lock); ath5k_reset(ah, NULL, true); mutex_unlock(&ah->lock); } static int ath5k_init(struct ieee80211_hw *hw) { struct ath5k_hw *ah = hw->priv; struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah); struct ath5k_txq *txq; u8 mac[ETH_ALEN] = {}; int ret; /* * Collect the channel list. The 802.11 layer * is responsible for filtering this list based * on settings like the phy mode and regulatory * domain restrictions. */ ret = ath5k_setup_bands(hw); if (ret) { ATH5K_ERR(ah, "can't get channels\n"); goto err; } /* * Allocate tx+rx descriptors and populate the lists. */ ret = ath5k_desc_alloc(ah); if (ret) { ATH5K_ERR(ah, "can't allocate descriptors\n"); goto err; } /* * Allocate hardware transmit queues: one queue for * beacon frames and one data queue for each QoS * priority. Note that hw functions handle resetting * these queues at the needed time. */ ret = ath5k_beaconq_setup(ah); if (ret < 0) { ATH5K_ERR(ah, "can't setup a beacon xmit queue\n"); goto err_desc; } ah->bhalq = ret; ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0); if (IS_ERR(ah->cabq)) { ATH5K_ERR(ah, "can't setup cab queue\n"); ret = PTR_ERR(ah->cabq); goto err_bhal; } /* 5211 and 5212 usually support 10 queues but we better rely on the * capability information */ if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) { /* This order matches mac80211's queue priority, so we can * directly use the mac80211 queue number without any mapping */ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO); if (IS_ERR(txq)) { ATH5K_ERR(ah, "can't setup xmit queue\n"); ret = PTR_ERR(txq); goto err_queues; } txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI); if (IS_ERR(txq)) { ATH5K_ERR(ah, "can't setup xmit queue\n"); ret = PTR_ERR(txq); goto err_queues; } txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE); if (IS_ERR(txq)) { ATH5K_ERR(ah, "can't setup xmit queue\n"); ret = PTR_ERR(txq); goto err_queues; } txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK); if (IS_ERR(txq)) { ATH5K_ERR(ah, "can't setup xmit queue\n"); ret = PTR_ERR(txq); goto err_queues; } hw->queues = 4; } else { /* older hardware (5210) can only support one data queue */ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE); if (IS_ERR(txq)) { ATH5K_ERR(ah, "can't setup xmit queue\n"); ret = PTR_ERR(txq); goto err_queues; } hw->queues = 1; } tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah); tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah); tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah); tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah); INIT_WORK(&ah->reset_work, ath5k_reset_work); INIT_WORK(&ah->calib_work, ath5k_calibrate_work); INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work); ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac); if (ret) { ATH5K_ERR(ah, "unable to read address from EEPROM\n"); goto err_queues; } SET_IEEE80211_PERM_ADDR(hw, mac); /* All MAC address bits matter for ACKs */ ath5k_update_bssid_mask_and_opmode(ah, NULL); regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain; ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier); if (ret) { ATH5K_ERR(ah, "can't initialize regulatory system\n"); goto err_queues; } ret = ieee80211_register_hw(hw); if (ret) { ATH5K_ERR(ah, "can't register ieee80211 hw\n"); goto err_queues; } if (!ath_is_world_regd(regulatory)) regulatory_hint(hw->wiphy, regulatory->alpha2); ath5k_init_leds(ah); ath5k_sysfs_register(ah); return 0; err_queues: ath5k_txq_release(ah); err_bhal: ath5k_hw_release_tx_queue(ah, ah->bhalq); err_desc: ath5k_desc_free(ah); err: return ret; } void ath5k_deinit_ah(struct ath5k_hw *ah) { struct ieee80211_hw *hw = ah->hw; /* * NB: the order of these is important: * o call the 802.11 layer before detaching ath5k_hw to * ensure callbacks into the driver to delete global * key cache entries can be handled * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * XXX: ??? detach ath5k_hw ??? * Other than that, it's straightforward... */ ieee80211_unregister_hw(hw); ath5k_desc_free(ah); ath5k_txq_release(ah); ath5k_hw_release_tx_queue(ah, ah->bhalq); ath5k_unregister_leds(ah); ath5k_sysfs_unregister(ah); /* * NB: can't reclaim these until after ieee80211_ifdetach * returns because we'll get called back to reclaim node * state and potentially want to use them. */ ath5k_hw_deinit(ah); free_irq(ah->irq, ah); } bool ath5k_any_vif_assoc(struct ath5k_hw *ah) { struct ath5k_vif_iter_data iter_data; iter_data.hw_macaddr = NULL; iter_data.any_assoc = false; iter_data.need_set_hw_addr = false; iter_data.found_active = true; ieee80211_iterate_active_interfaces_atomic( ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL, ath5k_vif_iter, &iter_data); return iter_data.any_assoc; } void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable) { struct ath5k_hw *ah = hw->priv; u32 rfilt; rfilt = ath5k_hw_get_rx_filter(ah); if (enable) rfilt |= AR5K_RX_FILTER_BEACON; else rfilt &= ~AR5K_RX_FILTER_BEACON; ath5k_hw_set_rx_filter(ah, rfilt); ah->filter_flags = rfilt; } void _ath5k_printk(const struct ath5k_hw *ah, const char *level, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (ah && ah->hw) printk("%s" pr_fmt("%s: %pV"), level, wiphy_name(ah->hw->wiphy), &vaf); else printk("%s" pr_fmt("%pV"), level, &vaf); va_end(args); }
gpl-2.0
szyusong/linux-at91
arch/tile/gxio/usb_host.c
2355
2060
/* * Copyright 2012 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ /* * * Implementation of USB gxio calls. */ #include <linux/io.h> #include <linux/errno.h> #include <linux/module.h> #include <gxio/iorpc_globals.h> #include <gxio/iorpc_usb_host.h> #include <gxio/kiorpc.h> #include <gxio/usb_host.h> int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index, int is_ehci) { char file[32]; int fd; if (is_ehci) snprintf(file, sizeof(file), "usb_host/%d/iorpc/ehci", usb_index); else snprintf(file, sizeof(file), "usb_host/%d/iorpc/ohci", usb_index); fd = hv_dev_open((HV_VirtAddr) file, 0); if (fd < 0) { if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) return fd; else return -ENODEV; } context->fd = fd; // Map in the MMIO space. context->mmio_base = (void __force *)iorpc_ioremap(fd, 0, HV_USB_HOST_MMIO_SIZE); if (context->mmio_base == NULL) { hv_dev_close(context->fd); return -ENODEV; } return 0; } EXPORT_SYMBOL_GPL(gxio_usb_host_init); int gxio_usb_host_destroy(gxio_usb_host_context_t *context) { iounmap((void __force __iomem *)(context->mmio_base)); hv_dev_close(context->fd); context->mmio_base = NULL; context->fd = -1; return 0; } EXPORT_SYMBOL_GPL(gxio_usb_host_destroy); void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context) { return context->mmio_base; } EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start); size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context) { return HV_USB_HOST_MMIO_SIZE; } EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_len);
gpl-2.0
FrozenCow/FIRE-ICE
drivers/staging/media/solo6x10/solo6x10-enc.c
2611
9756
/* * Copyright (C) 2010-2013 Bluecherry, LLC <http://www.bluecherrydvr.com> * * Original author: * Ben Collins <bcollins@ubuntu.com> * * Additional work by: * John Brooks <john.brooks@bluecherry.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/font.h> #include <linux/bitrev.h> #include <linux/slab.h> #include "solo6x10.h" #define VI_PROG_HSIZE (1280 - 16) #define VI_PROG_VSIZE (1024 - 16) #define IRQ_LEVEL 2 static void solo_capture_config(struct solo_dev *solo_dev) { unsigned long height; unsigned long width; void *buf; int i; solo_reg_write(solo_dev, SOLO_CAP_BASE, SOLO_CAP_MAX_PAGE((SOLO_CAP_EXT_SIZE(solo_dev) - SOLO_CAP_PAGE_SIZE) >> 16) | SOLO_CAP_BASE_ADDR(SOLO_CAP_EXT_ADDR(solo_dev) >> 16)); /* XXX: Undocumented bits at b17 and b24 */ if (solo_dev->type == SOLO_DEV_6110) { /* NOTE: Ref driver has (62 << 24) here as well, but it causes * wacked out frame timing on 4-port 6110. */ solo_reg_write(solo_dev, SOLO_CAP_BTW, (1 << 17) | SOLO_CAP_PROG_BANDWIDTH(2) | SOLO_CAP_MAX_BANDWIDTH(36)); } else { solo_reg_write(solo_dev, SOLO_CAP_BTW, (1 << 17) | SOLO_CAP_PROG_BANDWIDTH(2) | SOLO_CAP_MAX_BANDWIDTH(32)); } /* Set scale 1, 9 dimension */ width = solo_dev->video_hsize; height = solo_dev->video_vsize; solo_reg_write(solo_dev, SOLO_DIM_SCALE1, SOLO_DIM_H_MB_NUM(width / 16) | SOLO_DIM_V_MB_NUM_FRAME(height / 8) | SOLO_DIM_V_MB_NUM_FIELD(height / 16)); /* Set scale 2, 10 dimension */ width = solo_dev->video_hsize / 2; height = solo_dev->video_vsize; solo_reg_write(solo_dev, SOLO_DIM_SCALE2, SOLO_DIM_H_MB_NUM(width / 16) | SOLO_DIM_V_MB_NUM_FRAME(height / 8) | SOLO_DIM_V_MB_NUM_FIELD(height / 16)); /* Set scale 3, 11 dimension */ width = solo_dev->video_hsize / 2; height = solo_dev->video_vsize / 2; solo_reg_write(solo_dev, SOLO_DIM_SCALE3, SOLO_DIM_H_MB_NUM(width / 16) | SOLO_DIM_V_MB_NUM_FRAME(height / 8) | SOLO_DIM_V_MB_NUM_FIELD(height / 16)); /* Set scale 4, 12 dimension */ width = solo_dev->video_hsize / 3; height = solo_dev->video_vsize / 3; solo_reg_write(solo_dev, SOLO_DIM_SCALE4, SOLO_DIM_H_MB_NUM(width / 16) | SOLO_DIM_V_MB_NUM_FRAME(height / 8) | SOLO_DIM_V_MB_NUM_FIELD(height / 16)); /* Set scale 5, 13 dimension */ width = solo_dev->video_hsize / 4; height = solo_dev->video_vsize / 2; solo_reg_write(solo_dev, SOLO_DIM_SCALE5, SOLO_DIM_H_MB_NUM(width / 16) | SOLO_DIM_V_MB_NUM_FRAME(height / 8) | SOLO_DIM_V_MB_NUM_FIELD(height / 16)); /* Progressive */ width = VI_PROG_HSIZE; height = VI_PROG_VSIZE; solo_reg_write(solo_dev, SOLO_DIM_PROG, SOLO_DIM_H_MB_NUM(width / 16) | SOLO_DIM_V_MB_NUM_FRAME(height / 16) | SOLO_DIM_V_MB_NUM_FIELD(height / 16)); /* Clear OSD */ solo_reg_write(solo_dev, SOLO_VE_OSD_CH, 0); solo_reg_write(solo_dev, SOLO_VE_OSD_BASE, SOLO_EOSD_EXT_ADDR >> 16); solo_reg_write(solo_dev, SOLO_VE_OSD_CLR, 0xF0 << 16 | 0x80 << 8 | 0x80); if (solo_dev->type == SOLO_DEV_6010) solo_reg_write(solo_dev, SOLO_VE_OSD_OPT, SOLO_VE_OSD_H_SHADOW | SOLO_VE_OSD_V_SHADOW); else solo_reg_write(solo_dev, SOLO_VE_OSD_OPT, SOLO_VE_OSD_V_DOUBLE | SOLO_VE_OSD_H_SHADOW | SOLO_VE_OSD_V_SHADOW); /* Clear OSG buffer */ buf = kzalloc(SOLO_EOSD_EXT_SIZE(solo_dev), GFP_KERNEL); if (!buf) return; for (i = 0; i < solo_dev->nr_chans; i++) { solo_p2m_dma(solo_dev, 1, buf, SOLO_EOSD_EXT_ADDR + (SOLO_EOSD_EXT_SIZE(solo_dev) * i), SOLO_EOSD_EXT_SIZE(solo_dev), 0, 0); } kfree(buf); } /* Should be called with enable_lock held */ int solo_osd_print(struct solo_enc_dev *solo_enc) { struct solo_dev *solo_dev = solo_enc->solo_dev; unsigned char *str = solo_enc->osd_text; u8 *buf = solo_enc->osd_buf; u32 reg = solo_reg_read(solo_dev, SOLO_VE_OSD_CH); const struct font_desc *vga = find_font("VGA8x16"); const unsigned char *vga_data; int len; int i, j; if (WARN_ON_ONCE(!vga)) return -ENODEV; len = strlen(str); if (len == 0) { /* Disable OSD on this channel */ reg &= ~(1 << solo_enc->ch); solo_reg_write(solo_dev, SOLO_VE_OSD_CH, reg); return 0; } memset(buf, 0, SOLO_EOSD_EXT_SIZE_MAX); vga_data = (const unsigned char *)vga->data; for (i = 0; i < len; i++) { unsigned char c = str[i]; for (j = 0; j < 16; j++) { buf[(j * 2) + (i % 2) + (i / 2 * 32)] = bitrev8(vga_data[(c * 16) + j]); } } solo_p2m_dma(solo_dev, 1, buf, SOLO_EOSD_EXT_ADDR + (solo_enc->ch * SOLO_EOSD_EXT_SIZE(solo_dev)), SOLO_EOSD_EXT_SIZE(solo_dev), 0, 0); /* Enable OSD on this channel */ reg |= (1 << solo_enc->ch); solo_reg_write(solo_dev, SOLO_VE_OSD_CH, reg); return 0; } /** * Set channel Quality Profile (0-3). */ void solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch, unsigned int qp) { unsigned long flags; unsigned int idx, reg; if ((ch > 31) || (qp > 3)) return; if (solo_dev->type == SOLO_DEV_6010) return; if (ch < 16) { idx = 0; reg = SOLO_VE_JPEG_QP_CH_L; } else { ch -= 16; idx = 1; reg = SOLO_VE_JPEG_QP_CH_H; } ch *= 2; spin_lock_irqsave(&solo_dev->jpeg_qp_lock, flags); solo_dev->jpeg_qp[idx] &= ~(3 << ch); solo_dev->jpeg_qp[idx] |= (qp & 3) << ch; solo_reg_write(solo_dev, reg, solo_dev->jpeg_qp[idx]); spin_unlock_irqrestore(&solo_dev->jpeg_qp_lock, flags); } int solo_g_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch) { int idx; if (solo_dev->type == SOLO_DEV_6010) return 2; if (WARN_ON_ONCE(ch > 31)) return 2; if (ch < 16) { idx = 0; } else { ch -= 16; idx = 1; } ch *= 2; return (solo_dev->jpeg_qp[idx] >> ch) & 3; } #define SOLO_QP_INIT 0xaaaaaaaa static void solo_jpeg_config(struct solo_dev *solo_dev) { if (solo_dev->type == SOLO_DEV_6010) { solo_reg_write(solo_dev, SOLO_VE_JPEG_QP_TBL, (2 << 24) | (2 << 16) | (2 << 8) | 2); } else { solo_reg_write(solo_dev, SOLO_VE_JPEG_QP_TBL, (4 << 24) | (3 << 16) | (2 << 8) | 1); } spin_lock_init(&solo_dev->jpeg_qp_lock); /* Initialize Quality Profile for all channels */ solo_dev->jpeg_qp[0] = solo_dev->jpeg_qp[1] = SOLO_QP_INIT; solo_reg_write(solo_dev, SOLO_VE_JPEG_QP_CH_L, SOLO_QP_INIT); solo_reg_write(solo_dev, SOLO_VE_JPEG_QP_CH_H, SOLO_QP_INIT); solo_reg_write(solo_dev, SOLO_VE_JPEG_CFG, (SOLO_JPEG_EXT_SIZE(solo_dev) & 0xffff0000) | ((SOLO_JPEG_EXT_ADDR(solo_dev) >> 16) & 0x0000ffff)); solo_reg_write(solo_dev, SOLO_VE_JPEG_CTRL, 0xffffffff); if (solo_dev->type == SOLO_DEV_6110) { solo_reg_write(solo_dev, SOLO_VE_JPEG_CFG1, (0 << 16) | (30 << 8) | 60); } } static void solo_mp4e_config(struct solo_dev *solo_dev) { int i; u32 cfg; solo_reg_write(solo_dev, SOLO_VE_CFG0, SOLO_VE_INTR_CTRL(IRQ_LEVEL) | SOLO_VE_BLOCK_SIZE(SOLO_MP4E_EXT_SIZE(solo_dev) >> 16) | SOLO_VE_BLOCK_BASE(SOLO_MP4E_EXT_ADDR(solo_dev) >> 16)); cfg = SOLO_VE_BYTE_ALIGN(2) | SOLO_VE_INSERT_INDEX | SOLO_VE_MOTION_MODE(0); if (solo_dev->type != SOLO_DEV_6010) { cfg |= SOLO_VE_MPEG_SIZE_H( (SOLO_MP4E_EXT_SIZE(solo_dev) >> 24) & 0x0f); cfg |= SOLO_VE_JPEG_SIZE_H( (SOLO_JPEG_EXT_SIZE(solo_dev) >> 24) & 0x0f); } solo_reg_write(solo_dev, SOLO_VE_CFG1, cfg); solo_reg_write(solo_dev, SOLO_VE_WMRK_POLY, 0); solo_reg_write(solo_dev, SOLO_VE_VMRK_INIT_KEY, 0); solo_reg_write(solo_dev, SOLO_VE_WMRK_STRL, 0); if (solo_dev->type == SOLO_DEV_6110) solo_reg_write(solo_dev, SOLO_VE_WMRK_ENABLE, 0); solo_reg_write(solo_dev, SOLO_VE_ENCRYP_POLY, 0); solo_reg_write(solo_dev, SOLO_VE_ENCRYP_INIT, 0); solo_reg_write(solo_dev, SOLO_VE_ATTR, SOLO_VE_LITTLE_ENDIAN | SOLO_COMP_ATTR_FCODE(1) | SOLO_COMP_TIME_INC(0) | SOLO_COMP_TIME_WIDTH(15) | SOLO_DCT_INTERVAL(solo_dev->type == SOLO_DEV_6010 ? 9 : 10)); for (i = 0; i < solo_dev->nr_chans; i++) { solo_reg_write(solo_dev, SOLO_VE_CH_REF_BASE(i), (SOLO_EREF_EXT_ADDR(solo_dev) + (i * SOLO_EREF_EXT_SIZE)) >> 16); solo_reg_write(solo_dev, SOLO_VE_CH_REF_BASE_E(i), (SOLO_EREF_EXT_ADDR(solo_dev) + ((i + 16) * SOLO_EREF_EXT_SIZE)) >> 16); } if (solo_dev->type == SOLO_DEV_6110) { solo_reg_write(solo_dev, SOLO_VE_COMPT_MOT, 0x00040008); } else { for (i = 0; i < solo_dev->nr_chans; i++) solo_reg_write(solo_dev, SOLO_VE_CH_MOT(i), 0x100); } } int solo_enc_init(struct solo_dev *solo_dev) { int i; solo_capture_config(solo_dev); solo_mp4e_config(solo_dev); solo_jpeg_config(solo_dev); for (i = 0; i < solo_dev->nr_chans; i++) { solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(i), 0); solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(i), 0); } return 0; } void solo_enc_exit(struct solo_dev *solo_dev) { int i; for (i = 0; i < solo_dev->nr_chans; i++) { solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(i), 0); solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(i), 0); } }
gpl-2.0
sac23/Sacs_Stock_Kernel
drivers/net/wireless/b43/phy_n.c
3379
148197
/* Broadcom B43 wireless driver IEEE 802.11n PHY support Copyright (c) 2008 Michael Buesch <m@bues.ch> Copyright (c) 2010-2011 Rafał Miłecki <zajec5@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/delay.h> #include <linux/slab.h> #include <linux/types.h> #include "b43.h" #include "phy_n.h" #include "tables_nphy.h" #include "radio_2055.h" #include "radio_2056.h" #include "main.h" struct nphy_txgains { u16 txgm[2]; u16 pga[2]; u16 pad[2]; u16 ipa[2]; }; struct nphy_iqcal_params { u16 txgm; u16 pga; u16 pad; u16 ipa; u16 cal_gain; u16 ncorr[5]; }; struct nphy_iq_est { s32 iq0_prod; u32 i0_pwr; u32 q0_pwr; s32 iq1_prod; u32 i1_pwr; u32 q1_pwr; }; enum b43_nphy_rf_sequence { B43_RFSEQ_RX2TX, B43_RFSEQ_TX2RX, B43_RFSEQ_RESET2RX, B43_RFSEQ_UPDATE_GAINH, B43_RFSEQ_UPDATE_GAINL, B43_RFSEQ_UPDATE_GAINU, }; enum b43_nphy_rssi_type { B43_NPHY_RSSI_X = 0, B43_NPHY_RSSI_Y, B43_NPHY_RSSI_Z, B43_NPHY_RSSI_PWRDET, B43_NPHY_RSSI_TSSI_I, B43_NPHY_RSSI_TSSI_Q, B43_NPHY_RSSI_TBD, }; static inline bool b43_nphy_ipa(struct b43_wldev *dev) { enum ieee80211_band band = b43_current_band(dev->wl); return ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) || (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */ static u8 b43_nphy_get_rx_core_state(struct b43_wldev *dev) { return (b43_phy_read(dev, B43_NPHY_RFSEQCA) & B43_NPHY_RFSEQCA_RXEN) >> B43_NPHY_RFSEQCA_RXEN_SHIFT; } /************************************************** * RF (just without b43_nphy_rf_control_intc_override) **************************************************/ /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, enum b43_nphy_rf_sequence seq) { static const u16 trigger[] = { [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX, [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX, [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX, [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH, [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL, [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU, }; int i; u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); B43_WARN_ON(seq >= ARRAY_SIZE(trigger)); b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER); b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]); for (i = 0; i < 200; i++) { if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq])) goto ok; msleep(1); } b43err(dev->wl, "RF sequence status timeout\n"); ok: b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, u16 value, u8 core, bool off) { int i; u8 index = fls(field); u8 addr, en_addr, val_addr; /* we expect only one bit set */ B43_WARN_ON(field & (~(1 << (index - 1)))); if (dev->phy.rev >= 3) { const struct nphy_rf_control_override_rev3 *rf_ctrl; for (i = 0; i < 2; i++) { if (index == 0 || index == 16) { b43err(dev->wl, "Unsupported RF Ctrl Override call\n"); return; } rf_ctrl = &tbl_rf_control_override_rev3[index - 1]; en_addr = B43_PHY_N((i == 0) ? rf_ctrl->en_addr0 : rf_ctrl->en_addr1); val_addr = B43_PHY_N((i == 0) ? rf_ctrl->val_addr0 : rf_ctrl->val_addr1); if (off) { b43_phy_mask(dev, en_addr, ~(field)); b43_phy_mask(dev, val_addr, ~(rf_ctrl->val_mask)); } else { if (core == 0 || ((1 << i) & core)) { b43_phy_set(dev, en_addr, field); b43_phy_maskset(dev, val_addr, ~(rf_ctrl->val_mask), (value << rf_ctrl->val_shift)); } } } } else { const struct nphy_rf_control_override_rev2 *rf_ctrl; if (off) { b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field)); value = 0; } else { b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field); } for (i = 0; i < 2; i++) { if (index <= 1 || index == 16) { b43err(dev->wl, "Unsupported RF Ctrl Override call\n"); return; } if (index == 2 || index == 10 || (index >= 13 && index <= 15)) { core = 1; } rf_ctrl = &tbl_rf_control_override_rev2[index - 2]; addr = B43_PHY_N((i == 0) ? rf_ctrl->addr0 : rf_ctrl->addr1); if ((1 << i) & core) b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask), (value << rf_ctrl->shift)); b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_START); udelay(1); b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE); } } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, u16 value, u8 core) { u8 i, j; u16 reg, tmp, val; B43_WARN_ON(dev->phy.rev < 3); B43_WARN_ON(field > 4); for (i = 0; i < 2; i++) { if ((core == 1 && i == 1) || (core == 2 && !i)) continue; reg = (i == 0) ? B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2; b43_phy_set(dev, reg, 0x400); switch (field) { case 0: b43_phy_write(dev, reg, 0); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); break; case 1: if (!i) { b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1, 0xFC3F, (value << 6)); b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1, 0xFFFE, 1); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_START); for (j = 0; j < 100; j++) { if (!(b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START)) { j = 0; break; } udelay(10); } if (j) b43err(dev->wl, "intc override timeout\n"); b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, 0xFFFE); } else { b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2, 0xFC3F, (value << 6)); b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 1); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX); for (j = 0; j < 100; j++) { if (!(b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX)) { j = 0; break; } udelay(10); } if (j) b43err(dev->wl, "intc override timeout\n"); b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE); } break; case 2: if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { tmp = 0x0020; val = value << 5; } else { tmp = 0x0010; val = value << 4; } b43_phy_maskset(dev, reg, ~tmp, val); break; case 3: if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { tmp = 0x0001; val = value; } else { tmp = 0x0004; val = value << 2; } b43_phy_maskset(dev, reg, ~tmp, val); break; case 4: if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { tmp = 0x0002; val = value << 1; } else { tmp = 0x0008; val = value << 3; } b43_phy_maskset(dev, reg, ~tmp, val); break; } } } /************************************************** * Various PHY ops **************************************************/ /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ static void b43_nphy_write_clip_detection(struct b43_wldev *dev, const u16 *clip_st) { b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]); b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st) { clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES); clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) { u16 tmp; if (dev->dev->core_rev == 16) b43_mac_suspend(dev); tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL); tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN | B43_NPHY_CLASSCTL_WAITEDEN); tmp &= ~mask; tmp |= (val & mask); b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp); if (dev->dev->core_rev == 16) b43_mac_enable(dev); return tmp; } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ static void b43_nphy_reset_cca(struct b43_wldev *dev) { u16 bbcfg; b43_phy_force_clock(dev, 1); bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG); b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA); udelay(1); b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA); b43_phy_force_clock(dev, 0); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; if (enable) { static const u16 clip[] = { 0xFFFF, 0xFFFF }; if (nphy->deaf_count++ == 0) { nphy->classifier_state = b43_nphy_classifier(dev, 0, 0); b43_nphy_classifier(dev, 0x7, 0); b43_nphy_read_clip_detection(dev, nphy->clip_state); b43_nphy_write_clip_detection(dev, clip); } b43_nphy_reset_cca(dev); } else { if (--nphy->deaf_count == 0) { b43_nphy_classifier(dev, 0x7, nphy->classifier_state); b43_nphy_write_clip_detection(dev, nphy->clip_state); } } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u8 i; s16 tmp; u16 data[4]; s16 gain[2]; u16 minmax[2]; static const u16 lna_gain[4] = { -2, 10, 19, 25 }; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); if (nphy->gain_boost) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { gain[0] = 6; gain[1] = 6; } else { tmp = 40370 - 315 * dev->phy.channel; gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1)); tmp = 23242 - 224 * dev->phy.channel; gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1)); } } else { gain[0] = 0; gain[1] = 0; } for (i = 0; i < 2; i++) { if (nphy->elna_gain_config) { data[0] = 19 + gain[i]; data[1] = 25 + gain[i]; data[2] = 25 + gain[i]; data[3] = 25 + gain[i]; } else { data[0] = lna_gain[0] + gain[i]; data[1] = lna_gain[1] + gain[i]; data[2] = lna_gain[2] + gain[i]; data[3] = lna_gain[3] + gain[i]; } b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data); minmax[i] = 23 + gain[i]; } b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN, minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT); b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN, minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */ static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, u8 *events, u8 *delays, u8 length) { struct b43_phy_n *nphy = dev->phy.n; u8 i; u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F; u16 offset1 = cmd << 4; u16 offset2 = offset1 + 0x80; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events); b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays); for (i = length; i < 16; i++) { b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end); b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1); } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); } /************************************************** * Radio 0x2056 **************************************************/ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev3 *e) { b43_radio_write(dev, B2056_SYN_PLL_VCOCAL1, e->radio_syn_pll_vcocal1); b43_radio_write(dev, B2056_SYN_PLL_VCOCAL2, e->radio_syn_pll_vcocal2); b43_radio_write(dev, B2056_SYN_PLL_REFDIV, e->radio_syn_pll_refdiv); b43_radio_write(dev, B2056_SYN_PLL_MMD2, e->radio_syn_pll_mmd2); b43_radio_write(dev, B2056_SYN_PLL_MMD1, e->radio_syn_pll_mmd1); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, e->radio_syn_pll_loopfilter1); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, e->radio_syn_pll_loopfilter2); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER3, e->radio_syn_pll_loopfilter3); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, e->radio_syn_pll_loopfilter4); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER5, e->radio_syn_pll_loopfilter5); b43_radio_write(dev, B2056_SYN_RESERVED_ADDR27, e->radio_syn_reserved_addr27); b43_radio_write(dev, B2056_SYN_RESERVED_ADDR28, e->radio_syn_reserved_addr28); b43_radio_write(dev, B2056_SYN_RESERVED_ADDR29, e->radio_syn_reserved_addr29); b43_radio_write(dev, B2056_SYN_LOGEN_VCOBUF1, e->radio_syn_logen_vcobuf1); b43_radio_write(dev, B2056_SYN_LOGEN_MIXER2, e->radio_syn_logen_mixer2); b43_radio_write(dev, B2056_SYN_LOGEN_BUF3, e->radio_syn_logen_buf3); b43_radio_write(dev, B2056_SYN_LOGEN_BUF4, e->radio_syn_logen_buf4); b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA_TUNE, e->radio_rx0_lnaa_tune); b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG_TUNE, e->radio_rx0_lnag_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAA_BOOST_TUNE, e->radio_tx0_intpaa_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAG_BOOST_TUNE, e->radio_tx0_intpag_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_PADA_BOOST_TUNE, e->radio_tx0_pada_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_PADG_BOOST_TUNE, e->radio_tx0_padg_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAA_BOOST_TUNE, e->radio_tx0_pgaa_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAG_BOOST_TUNE, e->radio_tx0_pgag_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXA_BOOST_TUNE, e->radio_tx0_mixa_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXG_BOOST_TUNE, e->radio_tx0_mixg_boost_tune); b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA_TUNE, e->radio_rx1_lnaa_tune); b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG_TUNE, e->radio_rx1_lnag_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAA_BOOST_TUNE, e->radio_tx1_intpaa_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAG_BOOST_TUNE, e->radio_tx1_intpag_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_PADA_BOOST_TUNE, e->radio_tx1_pada_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_PADG_BOOST_TUNE, e->radio_tx1_padg_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAA_BOOST_TUNE, e->radio_tx1_pgaa_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAG_BOOST_TUNE, e->radio_tx1_pgag_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXA_BOOST_TUNE, e->radio_tx1_mixa_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXG_BOOST_TUNE, e->radio_tx1_mixg_boost_tune); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */ static void b43_radio_2056_setup(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev3 *e) { struct ssb_sprom *sprom = dev->dev->bus_sprom; enum ieee80211_band band = b43_current_band(dev->wl); u16 offset; u8 i; u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost; B43_WARN_ON(dev->phy.rev < 3); b43_chantab_radio_2056_upload(dev, e); b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ); if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); if (dev->dev->chip_id == 0x4716) { b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14); b43_radio_write(dev, B2056_SYN_PLL_CP2, 0); } else { b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B); b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14); } } if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR && b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05); b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C); } if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) { for (i = 0; i < 2; i++) { offset = i ? B2056_TX1 : B2056_TX0; if (dev->phy.rev >= 5) { b43_radio_write(dev, offset | B2056_TX_PADG_IDAC, 0xcc); if (dev->dev->chip_id == 0x4716) { bias = 0x40; cbias = 0x45; pag_boost = 0x5; pgag_boost = 0x33; mixg_boost = 0x55; } else { bias = 0x25; cbias = 0x20; pag_boost = 0x4; pgag_boost = 0x03; mixg_boost = 0x65; } padg_boost = 0x77; b43_radio_write(dev, offset | B2056_TX_INTPAG_IMAIN_STAT, bias); b43_radio_write(dev, offset | B2056_TX_INTPAG_IAUX_STAT, bias); b43_radio_write(dev, offset | B2056_TX_INTPAG_CASCBIAS, cbias); b43_radio_write(dev, offset | B2056_TX_INTPAG_BOOST_TUNE, pag_boost); b43_radio_write(dev, offset | B2056_TX_PGAG_BOOST_TUNE, pgag_boost); b43_radio_write(dev, offset | B2056_TX_PADG_BOOST_TUNE, padg_boost); b43_radio_write(dev, offset | B2056_TX_MIXG_BOOST_TUNE, mixg_boost); } else { bias = dev->phy.is_40mhz ? 0x40 : 0x20; b43_radio_write(dev, offset | B2056_TX_INTPAG_IMAIN_STAT, bias); b43_radio_write(dev, offset | B2056_TX_INTPAG_IAUX_STAT, bias); b43_radio_write(dev, offset | B2056_TX_INTPAG_CASCBIAS, 0x30); } b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee); } } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) { /* TODO */ } udelay(50); /* VCO calibration */ b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00); b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38); b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x18); b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38); b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x39); udelay(300); } static void b43_radio_init2056_pre(struct b43_wldev *dev) { b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU); /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_OEPORFORCE); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_OEPORFORCE); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU); } static void b43_radio_init2056_post(struct b43_wldev *dev) { b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB); b43_radio_set(dev, B2056_SYN_COM_PU, 0x2); b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2); msleep(1); b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2); b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC); b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1); /* if (nphy->init_por) Call Radio 2056 Recalibrate */ } /* * Initialize a Broadcom 2056 N-radio * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init */ static void b43_radio_init2056(struct b43_wldev *dev) { b43_radio_init2056_pre(dev); b2056_upload_inittabs(dev, 0, 0); b43_radio_init2056_post(dev); } /************************************************** * Radio 0x2055 **************************************************/ static void b43_chantab_radio_upload(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev2 *e) { b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref); b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0); b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1); b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail); b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1); b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2); b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1); b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1); b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2); b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf); b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1); b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2); b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune); b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune); b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1); b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn); b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim); b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune); b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune); b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1); b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn); b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */ static void b43_radio_2055_setup(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev2 *e) { B43_WARN_ON(dev->phy.rev >= 3); b43_chantab_radio_upload(dev, e); udelay(50); b43_radio_write(dev, B2055_VCO_CAL10, 0x05); b43_radio_write(dev, B2055_VCO_CAL10, 0x45); b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */ b43_radio_write(dev, B2055_VCO_CAL10, 0x65); udelay(300); } static void b43_radio_init2055_pre(struct b43_wldev *dev) { b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_PORFORCE); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU | B43_NPHY_RFCTL_CMD_OEPORFORCE); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_PORFORCE); } static void b43_radio_init2055_post(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; struct ssb_sprom *sprom = dev->dev->bus_sprom; int i; u16 val; bool workaround = false; if (sprom->revision < 4) workaround = (dev->dev->board_vendor != PCI_VENDOR_ID_BROADCOM && dev->dev->board_type == 0x46D && dev->dev->board_rev >= 0x41); else workaround = !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS); b43_radio_mask(dev, B2055_MASTER1, 0xFFF3); if (workaround) { b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F); b43_radio_mask(dev, B2055_C2_RX_BB_REG, 0x7F); } b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0xFFC0, 0x2C); b43_radio_write(dev, B2055_CAL_MISC, 0x3C); b43_radio_mask(dev, B2055_CAL_MISC, 0xFFBE); b43_radio_set(dev, B2055_CAL_LPOCTL, 0x80); b43_radio_set(dev, B2055_CAL_MISC, 0x1); msleep(1); b43_radio_set(dev, B2055_CAL_MISC, 0x40); for (i = 0; i < 200; i++) { val = b43_radio_read(dev, B2055_CAL_COUT2); if (val & 0x80) { i = 0; break; } udelay(10); } if (i) b43err(dev->wl, "radio post init timeout\n"); b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F); b43_switch_channel(dev, dev->phy.channel); b43_radio_write(dev, B2055_C1_RX_BB_LPF, 0x9); b43_radio_write(dev, B2055_C2_RX_BB_LPF, 0x9); b43_radio_write(dev, B2055_C1_RX_BB_MIDACHP, 0x83); b43_radio_write(dev, B2055_C2_RX_BB_MIDACHP, 0x83); b43_radio_maskset(dev, B2055_C1_LNA_GAINBST, 0xFFF8, 0x6); b43_radio_maskset(dev, B2055_C2_LNA_GAINBST, 0xFFF8, 0x6); if (!nphy->gain_boost) { b43_radio_set(dev, B2055_C1_RX_RFSPC1, 0x2); b43_radio_set(dev, B2055_C2_RX_RFSPC1, 0x2); } else { b43_radio_mask(dev, B2055_C1_RX_RFSPC1, 0xFFFD); b43_radio_mask(dev, B2055_C2_RX_RFSPC1, 0xFFFD); } udelay(2); } /* * Initialize a Broadcom 2055 N-radio * http://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init */ static void b43_radio_init2055(struct b43_wldev *dev) { b43_radio_init2055_pre(dev); if (b43_status(dev) < B43_STAT_INITIALIZED) { /* Follow wl, not specs. Do not force uploading all regs */ b2055_upload_inittab(dev, 0, 0); } else { bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ; b2055_upload_inittab(dev, ghz5, 0); } b43_radio_init2055_post(dev); } /************************************************** * Samples **************************************************/ /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */ static int b43_nphy_load_samples(struct b43_wldev *dev, struct b43_c32 *samples, u16 len) { struct b43_phy_n *nphy = dev->phy.n; u16 i; u32 *data; data = kzalloc(len * sizeof(u32), GFP_KERNEL); if (!data) { b43err(dev->wl, "allocation for samples loading failed\n"); return -ENOMEM; } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); for (i = 0; i < len; i++) { data[i] = (samples[i].i & 0x3FF << 10); data[i] |= samples[i].q & 0x3FF; } b43_ntab_write_bulk(dev, B43_NTAB32(17, 0), len, data); kfree(data); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); return 0; } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max, bool test) { int i; u16 bw, len, rot, angle; struct b43_c32 *samples; bw = (dev->phy.is_40mhz) ? 40 : 20; len = bw << 3; if (test) { if (b43_phy_read(dev, B43_NPHY_BBCFG) & B43_NPHY_BBCFG_RSTRX) bw = 82; else bw = 80; if (dev->phy.is_40mhz) bw <<= 1; len = bw << 1; } samples = kcalloc(len, sizeof(struct b43_c32), GFP_KERNEL); if (!samples) { b43err(dev->wl, "allocation for samples generation failed\n"); return 0; } rot = (((freq * 36) / bw) << 16) / 100; angle = 0; for (i = 0; i < len; i++) { samples[i] = b43_cordic(angle); angle += rot; samples[i].q = CORDIC_CONVERT(samples[i].q * max); samples[i].i = CORDIC_CONVERT(samples[i].i * max); } i = b43_nphy_load_samples(dev, samples, len); kfree(samples); return (i < 0) ? 0 : len; } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops, u16 wait, bool iqmode, bool dac_test) { struct b43_phy_n *nphy = dev->phy.n; int i; u16 seq_mode; u32 tmp; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); if ((nphy->bb_mult_save & 0x80000000) == 0) { tmp = b43_ntab_read(dev, B43_NTAB16(15, 87)); nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000; } if (!dev->phy.is_40mhz) tmp = 0x6464; else tmp = 0x4747; b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1)); if (loops != 0xFFFF) b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, (loops - 1)); else b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, loops); b43_phy_write(dev, B43_NPHY_SAMP_WAITCNT, wait); seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER); if (iqmode) { b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); b43_phy_set(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8000); } else { if (dac_test) b43_phy_write(dev, B43_NPHY_SAMP_CMD, 5); else b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1); } for (i = 0; i < 100; i++) { if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & 1)) { i = 0; break; } udelay(10); } if (i) b43err(dev->wl, "run samples timeout\n"); b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); } /************************************************** * RSSI **************************************************/ /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */ static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale, s8 offset, u8 core, u8 rail, enum b43_nphy_rssi_type type) { u16 tmp; bool core1or5 = (core == 1) || (core == 5); bool core2or5 = (core == 2) || (core == 5); offset = clamp_val(offset, -32, 31); tmp = ((scale & 0x3F) << 8) | (offset & 0x3F); if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp); if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp); if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp); if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp); if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_X)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp); if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_X)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp); if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_X)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp); if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_X)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp); if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp); if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp); if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp); if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp); if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp); if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp); if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp); if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp); if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp); if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp); if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp); if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp); if (core1or5 && (type == B43_NPHY_RSSI_TSSI_I)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp); if (core2or5 && (type == B43_NPHY_RSSI_TSSI_I)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp); if (core1or5 && (type == B43_NPHY_RSSI_TSSI_Q)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp); if (core2or5 && (type == B43_NPHY_RSSI_TSSI_Q)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp); } static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type) { u8 i; u16 reg, val; if (code == 0) { b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, 0xFDFF); b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, 0xFDFF); b43_phy_mask(dev, B43_NPHY_AFECTL_C1, 0xFCFF); b43_phy_mask(dev, B43_NPHY_AFECTL_C2, 0xFCFF); b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S0, 0xFFDF); b43_phy_mask(dev, B43_NPHY_TXF_40CO_B32S1, 0xFFDF); b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0xFFC3); b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0xFFC3); } else { for (i = 0; i < 2; i++) { if ((code == 1 && i == 1) || (code == 2 && !i)) continue; reg = (i == 0) ? B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER; b43_phy_maskset(dev, reg, 0xFDFF, 0x0200); if (type < 3) { reg = (i == 0) ? B43_NPHY_AFECTL_C1 : B43_NPHY_AFECTL_C2; b43_phy_maskset(dev, reg, 0xFCFF, 0); reg = (i == 0) ? B43_NPHY_RFCTL_LUT_TRSW_UP1 : B43_NPHY_RFCTL_LUT_TRSW_UP2; b43_phy_maskset(dev, reg, 0xFFC3, 0); if (type == 0) val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8; else if (type == 1) val = 16; else val = 32; b43_phy_set(dev, reg, val); reg = (i == 0) ? B43_NPHY_TXF_40CO_B1S0 : B43_NPHY_TXF_40CO_B32S1; b43_phy_set(dev, reg, 0x0020); } else { if (type == 6) val = 0x0100; else if (type == 3) val = 0x0200; else val = 0x0300; reg = (i == 0) ? B43_NPHY_AFECTL_C1 : B43_NPHY_AFECTL_C2; b43_phy_maskset(dev, reg, 0xFCFF, val); b43_phy_maskset(dev, reg, 0xF3FF, val << 2); if (type != 3 && type != 6) { enum ieee80211_band band = b43_current_band(dev->wl); if (b43_nphy_ipa(dev)) val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE; else val = 0x11; reg = (i == 0) ? 0x2000 : 0x3000; reg |= B2055_PADDRV; b43_radio_write16(dev, reg, val); reg = (i == 0) ? B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER; b43_phy_set(dev, reg, 0x0200); } } } } } static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type) { u16 val; if (type < 3) val = 0; else if (type == 6) val = 1; else if (type == 3) val = 2; else val = 3; val = (val << 12) | (val << 14); b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val); b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val); if (type < 3) { b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF, (type + 1) << 4); b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF, (type + 1) << 4); } if (code == 0) { b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000); if (type < 3) { b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~(B43_NPHY_RFCTL_CMD_RXEN | B43_NPHY_RFCTL_CMD_CORESEL)); b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(0x1 << 12 | 0x1 << 5 | 0x1 << 1 | 0x1)); b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_START); udelay(20); b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); } } else { b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000); if (type < 3) { b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, ~(B43_NPHY_RFCTL_CMD_RXEN | B43_NPHY_RFCTL_CMD_CORESEL), (B43_NPHY_RFCTL_CMD_RXEN | code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT)); b43_phy_set(dev, B43_NPHY_RFCTL_OVER, (0x1 << 12 | 0x1 << 5 | 0x1 << 1 | 0x1)); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_START); udelay(20); b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); } } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */ static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type) { if (dev->phy.rev >= 3) b43_nphy_rev3_rssi_select(dev, code, type); else b43_nphy_rev2_rssi_select(dev, code, type); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */ static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf) { int i; for (i = 0; i < 2; i++) { if (type == 2) { if (i == 0) { b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM, 0xFC, buf[0]); b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5, 0xFC, buf[1]); } else { b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM, 0xFC, buf[2 * i]); b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5, 0xFC, buf[2 * i + 1]); } } else { if (i == 0) b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5, 0xF3, buf[0] << 2); else b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5, 0xF3, buf[2 * i + 1] << 2); } } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf, u8 nsamp) { int i; int out; u16 save_regs_phy[9]; u16 s[2]; if (dev->phy.rev >= 3) { save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1); save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2); save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0); save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1); save_regs_phy[8] = 0; } else { save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_RFCTL_CMD); save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); save_regs_phy[7] = 0; save_regs_phy[8] = 0; } b43_nphy_rssi_select(dev, 5, type); if (dev->phy.rev < 2) { save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL); b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5); } for (i = 0; i < 4; i++) buf[i] = 0; for (i = 0; i < nsamp; i++) { if (dev->phy.rev < 2) { s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT); s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT); } else { s[0] = b43_phy_read(dev, B43_NPHY_RSSI1); s[1] = b43_phy_read(dev, B43_NPHY_RSSI2); } buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2; buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2; buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2; buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2; } out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 | (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF); if (dev->phy.rev < 2) b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]); if (dev->phy.rev >= 3) { b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]); b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, save_regs_phy[2]); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, save_regs_phy[3]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]); b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]); b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]); } else { b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]); b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]); b43_phy_write(dev, B43_NPHY_RFCTL_CMD, save_regs_phy[3]); b43_phy_write(dev, B43_NPHY_RFCTL_OVER, save_regs_phy[4]); b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, save_regs_phy[5]); b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, save_regs_phy[6]); } return out; } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 saved_regs_phy_rfctl[2]; u16 saved_regs_phy[13]; u16 regs_to_store[] = { B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER, B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2, B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER, B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2, B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2 }; u16 class; u16 clip_state[2]; u16 clip_off[2] = { 0xFFFF, 0xFFFF }; u8 vcm_final = 0; s8 offset[4]; s32 results[8][4] = { }; s32 results_min[4] = { }; s32 poll_results[4] = { }; u16 *rssical_radio_regs = NULL; u16 *rssical_phy_regs = NULL; u16 r; /* routing */ u8 rx_core_state; u8 core, i, j; class = b43_nphy_classifier(dev, 0, 0); b43_nphy_classifier(dev, 7, 4); b43_nphy_read_clip_detection(dev, clip_state); b43_nphy_write_clip_detection(dev, clip_off); saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); for (i = 0; i < ARRAY_SIZE(regs_to_store); i++) saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]); b43_nphy_rf_control_intc_override(dev, 0, 0, 7); b43_nphy_rf_control_intc_override(dev, 1, 1, 7); b43_nphy_rf_control_override(dev, 0x1, 0, 0, false); b43_nphy_rf_control_override(dev, 0x2, 1, 0, false); b43_nphy_rf_control_override(dev, 0x80, 1, 0, false); b43_nphy_rf_control_override(dev, 0x40, 1, 0, false); if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { b43_nphy_rf_control_override(dev, 0x20, 0, 0, false); b43_nphy_rf_control_override(dev, 0x10, 1, 0, false); } else { b43_nphy_rf_control_override(dev, 0x10, 0, 0, false); b43_nphy_rf_control_override(dev, 0x20, 1, 0, false); } rx_core_state = b43_nphy_get_rx_core_state(dev); for (core = 0; core < 2; core++) { if (!(rx_core_state & (1 << core))) continue; r = core ? B2056_RX1 : B2056_RX0; b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 0, 2); b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, 2); for (i = 0; i < 8; i++) { b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3, i << 2); b43_nphy_poll_rssi(dev, 2, results[i], 8); } for (i = 0; i < 4; i++) { s32 curr; s32 mind = 40; s32 minpoll = 249; u8 minvcm = 0; if (2 * core != i) continue; for (j = 0; j < 8; j++) { curr = results[j][i] * results[j][i] + results[j][i + 1] * results[j][i]; if (curr < mind) { mind = curr; minvcm = j; } if (results[j][i] < minpoll) minpoll = results[j][i]; } vcm_final = minvcm; results_min[i] = minpoll; } b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3, vcm_final << 2); for (i = 0; i < 4; i++) { if (core != i / 2) continue; offset[i] = -results[vcm_final][i]; if (offset[i] < 0) offset[i] = -((abs(offset[i]) + 4) / 8); else offset[i] = (offset[i] + 4) / 8; if (results_min[i] == 248) offset[i] = -32; b43_nphy_scale_offset_rssi(dev, 0, offset[i], (i / 2 == 0) ? 1 : 2, (i % 2 == 0) ? 0 : 1, 2); } } for (core = 0; core < 2; core++) { if (!(rx_core_state & (1 << core))) continue; for (i = 0; i < 2; i++) { b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 0, i); b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, i); b43_nphy_poll_rssi(dev, i, poll_results, 8); for (j = 0; j < 4; j++) { if (j / 2 == core) offset[j] = 232 - poll_results[j]; if (offset[j] < 0) offset[j] = -(abs(offset[j] + 4) / 8); else offset[j] = (offset[j] + 4) / 8; b43_nphy_scale_offset_rssi(dev, 0, offset[2 * core], core + 1, j % 2, i); } } } b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, saved_regs_phy_rfctl[0]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, saved_regs_phy_rfctl[1]); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); b43_phy_set(dev, B43_NPHY_TXF_40CO_B1S1, 0x1); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_START); b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1); b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX); b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1); for (i = 0; i < ARRAY_SIZE(regs_to_store); i++) b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]); /* Store for future configuration */ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G; } else { rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G; rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G; } rssical_radio_regs[0] = b43_radio_read(dev, 0x602B); rssical_radio_regs[0] = b43_radio_read(dev, 0x702B); rssical_phy_regs[0] = b43_phy_read(dev, B43_NPHY_RSSIMC_0I_RSSI_Z); rssical_phy_regs[1] = b43_phy_read(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z); rssical_phy_regs[2] = b43_phy_read(dev, B43_NPHY_RSSIMC_1I_RSSI_Z); rssical_phy_regs[3] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z); rssical_phy_regs[4] = b43_phy_read(dev, B43_NPHY_RSSIMC_0I_RSSI_X); rssical_phy_regs[5] = b43_phy_read(dev, B43_NPHY_RSSIMC_0Q_RSSI_X); rssical_phy_regs[6] = b43_phy_read(dev, B43_NPHY_RSSIMC_1I_RSSI_X); rssical_phy_regs[7] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_X); rssical_phy_regs[8] = b43_phy_read(dev, B43_NPHY_RSSIMC_0I_RSSI_Y); rssical_phy_regs[9] = b43_phy_read(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y); rssical_phy_regs[10] = b43_phy_read(dev, B43_NPHY_RSSIMC_1I_RSSI_Y); rssical_phy_regs[11] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y); /* Remember for which channel we store configuration */ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) nphy->rssical_chanspec_2G.center_freq = dev->phy.channel_freq; else nphy->rssical_chanspec_5G.center_freq = dev->phy.channel_freq; /* End of calibration, restore configuration */ b43_nphy_classifier(dev, 7, class); b43_nphy_write_clip_detection(dev, clip_state); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) { int i, j; u8 state[4]; u8 code, val; u16 class, override; u8 regs_save_radio[2]; u16 regs_save_phy[2]; s8 offset[4]; u8 core; u8 rail; u16 clip_state[2]; u16 clip_off[2] = { 0xFFFF, 0xFFFF }; s32 results_min[4] = { }; u8 vcm_final[4] = { }; s32 results[4][4] = { }; s32 miniq[4][2] = { }; if (type == 2) { code = 0; val = 6; } else if (type < 2) { code = 25; val = 4; } else { B43_WARN_ON(1); return; } class = b43_nphy_classifier(dev, 0, 0); b43_nphy_classifier(dev, 7, 4); b43_nphy_read_clip_detection(dev, clip_state); b43_nphy_write_clip_detection(dev, clip_off); if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) override = 0x140; else override = 0x110; regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX); b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override); b43_radio_write16(dev, B2055_C1_PD_RXTX, val); regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override); b43_radio_write16(dev, B2055_C2_PD_RXTX, val); state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07; state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07; b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8); b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8); state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07; state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07; b43_nphy_rssi_select(dev, 5, type); b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type); b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type); for (i = 0; i < 4; i++) { u8 tmp[4]; for (j = 0; j < 4; j++) tmp[j] = i; if (type != 1) b43_nphy_set_rssi_2055_vcm(dev, type, tmp); b43_nphy_poll_rssi(dev, type, results[i], 8); if (type < 2) for (j = 0; j < 2; j++) miniq[i][j] = min(results[i][2 * j], results[i][2 * j + 1]); } for (i = 0; i < 4; i++) { s32 mind = 40; u8 minvcm = 0; s32 minpoll = 249; s32 curr; for (j = 0; j < 4; j++) { if (type == 2) curr = abs(results[j][i]); else curr = abs(miniq[j][i / 2] - code * 8); if (curr < mind) { mind = curr; minvcm = j; } if (results[j][i] < minpoll) minpoll = results[j][i]; } results_min[i] = minpoll; vcm_final[i] = minvcm; } if (type != 1) b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final); for (i = 0; i < 4; i++) { offset[i] = (code * 8) - results[vcm_final[i]][i]; if (offset[i] < 0) offset[i] = -((abs(offset[i]) + 4) / 8); else offset[i] = (offset[i] + 4) / 8; if (results_min[i] == 248) offset[i] = code - 32; core = (i / 2) ? 2 : 1; rail = (i % 2) ? 1 : 0; b43_nphy_scale_offset_rssi(dev, 0, offset[i], core, rail, type); } b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]); b43_radio_maskset(dev, B2055_C2_PD_RSSIMISC, 0xF8, state[1]); switch (state[2]) { case 1: b43_nphy_rssi_select(dev, 1, 2); break; case 4: b43_nphy_rssi_select(dev, 1, 0); break; case 2: b43_nphy_rssi_select(dev, 1, 1); break; default: b43_nphy_rssi_select(dev, 1, 1); break; } switch (state[3]) { case 1: b43_nphy_rssi_select(dev, 2, 2); break; case 4: b43_nphy_rssi_select(dev, 2, 0); break; default: b43_nphy_rssi_select(dev, 2, 1); break; } b43_nphy_rssi_select(dev, 0, type); b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]); b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]); b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]); b43_nphy_classifier(dev, 7, class); b43_nphy_write_clip_detection(dev, clip_state); /* Specs don't say about reset here, but it makes wl and b43 dumps identical, it really seems wl performs this */ b43_nphy_reset_cca(dev); } /* * RSSI Calibration * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */ static void b43_nphy_rssi_cal(struct b43_wldev *dev) { if (dev->phy.rev >= 3) { b43_nphy_rev3_rssi_cal(dev); } else { b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Z); b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_X); b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Y); } } /************************************************** * Workarounds **************************************************/ static void b43_nphy_gain_ctl_workarounds_rev3plus(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; bool ghz5; bool ext_lna; u16 rssi_gain; struct nphy_gain_ctl_workaround_entry *e; u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 }; u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 }; /* Prepare values */ ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ; ext_lna = ghz5 ? sprom->boardflags_hi & B43_BFH_EXTLNA_5GHZ : sprom->boardflags_lo & B43_BFL_EXTLNA; e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna); if (ghz5 && dev->phy.rev >= 5) rssi_gain = 0x90; else rssi_gain = 0x50; b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040); /* Set Clip 2 detect */ b43_phy_set(dev, B43_NPHY_C1_CGAINI, B43_NPHY_C1_CGAINI_CL2DETECT); b43_phy_set(dev, B43_NPHY_C2_CGAINI, B43_NPHY_C2_CGAINI_CL2DETECT); b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC, 0x17); b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC, 0x17); b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0); b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0); b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00); b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00); b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN, rssi_gain); b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN, rssi_gain); b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC, 0x17); b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC, 0x17); b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF); b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF); b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain); b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain); b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain); b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain); b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db); b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db); b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits); b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits); b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain); b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain); b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits); b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits); b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); b43_phy_write(dev, 0x2A7, e->init_gain); b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2, e->rfseq_init); /* TODO: check defines. Do not match variables names */ b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain); b43_phy_write(dev, 0x2A9, e->cliphi_gain); b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain); b43_phy_write(dev, 0x2AB, e->clipmd_gain); b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain); b43_phy_write(dev, 0x2AD, e->cliplo_gain); b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin); b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl); b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu); b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip); b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip); b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip); b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip); b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); } static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u8 i, j; u8 code; u16 tmp; u8 rfseq_events[3] = { 6, 8, 7 }; u8 rfseq_delays[3] = { 10, 30, 1 }; /* Set Clip 2 detect */ b43_phy_set(dev, B43_NPHY_C1_CGAINI, B43_NPHY_C1_CGAINI_CL2DETECT); b43_phy_set(dev, B43_NPHY_C2_CGAINI, B43_NPHY_C2_CGAINI_CL2DETECT); /* Set narrowband clip threshold */ b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84); b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84); if (!dev->phy.is_40mhz) { /* Set dwell lengths */ b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B); b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B); b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009); b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009); } /* Set wideband clip 2 threshold */ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, 21); b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, 21); if (!dev->phy.is_40mhz) { b43_phy_maskset(dev, B43_NPHY_C1_CGAINI, ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1); b43_phy_maskset(dev, B43_NPHY_C2_CGAINI, ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1); b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI, ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1); b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI, ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1); } b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); if (nphy->gain_boost) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ && dev->phy.is_40mhz) code = 4; else code = 5; } else { code = dev->phy.is_40mhz ? 6 : 7; } /* Set HPVGA2 index */ b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, ~B43_NPHY_C1_INITGAIN_HPVGA2, code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT); b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, ~B43_NPHY_C2_INITGAIN_HPVGA2, code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); /* specs say about 2 loops, but wl does 4 */ for (i = 0; i < 4; i++) b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (code << 8 | 0x7C)); b43_nphy_adjust_lna_gain_table(dev); if (nphy->elna_gain_config) { b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); /* specs say about 2 loops, but wl does 4 */ for (i = 0; i < 4; i++) b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (code << 8 | 0x74)); } if (dev->phy.rev == 2) { for (i = 0; i < 4; i++) { b43_phy_write(dev, B43_NPHY_TABLE_ADDR, (0x0400 * i) + 0x0020); for (j = 0; j < 21; j++) { tmp = j * (i < 2 ? 3 : 1); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, tmp); } } } b43_nphy_set_rf_sequence(dev, 5, rfseq_events, rfseq_delays, 3); b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev) { if (dev->phy.rev >= 3) b43_nphy_gain_ctl_workarounds_rev3plus(dev); else b43_nphy_gain_ctl_workarounds_rev1_2(dev); } static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; struct ssb_sprom *sprom = dev->dev->bus_sprom; /* TX to RX */ u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F }; u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 }; /* RX to TX */ u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3, 0x1F }; u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 }; u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F }; u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 }; u16 tmp16; u32 tmp32; b43_phy_write(dev, 0x23f, 0x1f8); b43_phy_write(dev, 0x240, 0x1f8); tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0)); tmp32 &= 0xffffff; b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32); b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125); b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3); b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105); b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E); b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD); b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020); b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C); b43_phy_write(dev, 0x2AE, 0x000C); /* TX to RX */ b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, ARRAY_SIZE(tx2rx_events)); /* RX to TX */ if (b43_nphy_ipa(dev)) b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa, rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa)); if (nphy->hw_phyrxchain != 3 && nphy->hw_phyrxchain != nphy->hw_phytxchain) { if (b43_nphy_ipa(dev)) { rx2tx_delays[5] = 59; rx2tx_delays[6] = 1; rx2tx_events[7] = 0x1F; } b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, ARRAY_SIZE(rx2tx_events)); } tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 0x2 : 0x9C40; b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16); b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700); b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D); b43_nphy_gain_ctl_workarounds(dev); b43_ntab_write(dev, B43_NTAB16(8, 0), 2); b43_ntab_write(dev, B43_NTAB16(8, 16), 2); /* TODO */ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00); /* N PHY WAR TX Chain Update with hw_phytxchain as argument */ if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR && b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) tmp32 = 0x00088888; else tmp32 = 0x88888888; b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32); b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32); b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32); if (dev->phy.rev == 4 && b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC, 0x70); b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC, 0x70); } b43_phy_write(dev, 0x224, 0x03eb); b43_phy_write(dev, 0x225, 0x03eb); b43_phy_write(dev, 0x226, 0x0341); b43_phy_write(dev, 0x227, 0x0341); b43_phy_write(dev, 0x228, 0x042b); b43_phy_write(dev, 0x229, 0x042b); b43_phy_write(dev, 0x22a, 0x0381); b43_phy_write(dev, 0x22b, 0x0381); b43_phy_write(dev, 0x22c, 0x042b); b43_phy_write(dev, 0x22d, 0x042b); b43_phy_write(dev, 0x22e, 0x0381); b43_phy_write(dev, 0x22f, 0x0381); } static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 }; u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 }; u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && nphy->band5g_pwrgain) { b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8); b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8); } else { b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8); b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8); } b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A); b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A); b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA); b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA); if (dev->phy.rev < 2) { b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000); b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000); b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB); b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB); b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800); b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800); } b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD && dev->dev->board_type == 0x8B) { delays1[0] = 0x1; delays1[5] = 0x14; } b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7); b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7); b43_nphy_gain_ctl_workarounds(dev); if (dev->phy.rev < 2) { if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2) b43_hf_write(dev, b43_hf_read(dev) | B43_HF_MLADVW); } else if (dev->phy.rev == 2) { b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0); b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0); } if (dev->phy.rev < 2) b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL, ~B43_NPHY_SCRAM_SIGCTL_SCM); /* Set phase track alpha and beta */ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125); b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3); b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105); b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E); b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD); b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); b43_phy_mask(dev, B43_NPHY_PIL_DW1, ~B43_NPHY_PIL_DW_64QAM & 0xFFFF); b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5); b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4); b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00); if (dev->phy.rev == 2) b43_phy_set(dev, B43_NPHY_FINERX2_CGC, B43_NPHY_FINERX2_CGC_DECGC); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */ static void b43_nphy_workarounds(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) b43_nphy_classifier(dev, 1, 0); else b43_nphy_classifier(dev, 1, 1); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); b43_phy_set(dev, B43_NPHY_IQFLIP, B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); if (dev->phy.rev >= 3) b43_nphy_workarounds_rev3plus(dev); else b43_nphy_workarounds_rev1_2(dev); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } /************************************************** * Tx/Rx common **************************************************/ /* * Transmits a known value for LO calibration * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone */ static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val, bool iqmode, bool dac_test) { u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test); if (samp == 0) return -1; b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test); return 0; } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */ static void b43_nphy_update_txrx_chain(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; bool override = false; u16 chain = 0x33; if (nphy->txrx_chain == 0) { chain = 0x11; override = true; } else if (nphy->txrx_chain == 1) { chain = 0x22; override = true; } b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN), chain); if (override) b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER); else b43_phy_mask(dev, B43_NPHY_RFSEQMODE, ~B43_NPHY_RFSEQMODE_CAOVER); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */ static void b43_nphy_stop_playback(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 tmp; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT); if (tmp & 0x1) b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP); else if (tmp & 0x2) b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004); if (nphy->bb_mult_save & 0x80000000) { tmp = nphy->bb_mult_save & 0xFFFF; b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); nphy->bb_mult_save = 0; } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, struct nphy_txgains target, struct nphy_iqcal_params *params) { int i, j, indx; u16 gain; if (dev->phy.rev >= 3) { params->txgm = target.txgm[core]; params->pga = target.pga[core]; params->pad = target.pad[core]; params->ipa = target.ipa[core]; params->cal_gain = (params->txgm << 12) | (params->pga << 8) | (params->pad << 4) | (params->ipa); for (j = 0; j < 5; j++) params->ncorr[j] = 0x79; } else { gain = (target.pad[core]) | (target.pga[core] << 4) | (target.txgm[core] << 8); indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 1 : 0; for (i = 0; i < 9; i++) if (tbl_iqcal_gainparams[indx][i][0] == gain) break; i = min(i, 8); params->txgm = tbl_iqcal_gainparams[indx][i][1]; params->pga = tbl_iqcal_gainparams[indx][i][2]; params->pad = tbl_iqcal_gainparams[indx][i][3]; params->cal_gain = (params->txgm << 7) | (params->pga << 4) | (params->pad << 2); for (j = 0; j < 4; j++) params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j]; } } /************************************************** * Tx and Rx **************************************************/ void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) {//TODO } static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev) {//TODO } static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, bool ignore_tssi) {//TODO return B43_TXPWR_RES_DONE; } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) { struct b43_phy_n *nphy = dev->phy.n; u8 i; u16 bmask, val, tmp; enum ieee80211_band band = b43_current_band(dev->wl); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); nphy->txpwrctrl = enable; if (!enable) { if (dev->phy.rev >= 3 && (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) & (B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN | B43_NPHY_TXPCTL_CMD_PCTLEN))) { /* We disable enabled TX pwr ctl, save it's state */ nphy->tx_pwr_idx[0] = b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) & 0x7f; nphy->tx_pwr_idx[1] = b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) & 0x7f; } b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840); for (i = 0; i < 84; i++) b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40); for (i = 0; i < 84; i++) b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; if (dev->phy.rev >= 3) tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN; b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp); if (dev->phy.rev >= 3) { b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); } else { b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); } if (dev->phy.rev == 2) b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~B43_NPHY_BPHY_CTL3_SCALE, 0x53); else if (dev->phy.rev < 2) b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A); if (dev->phy.rev < 2 && dev->phy.is_40mhz) b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW); } else { b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl); b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl); bmask = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; /* wl does useless check for "enable" param here */ val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; if (dev->phy.rev >= 3) { bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN; if (val) val |= B43_NPHY_TXPCTL_CMD_PCTLEN; } b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val); if (band == IEEE80211_BAND_5GHZ) { b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~B43_NPHY_TXPCTL_CMD_INIT, 0x64); if (dev->phy.rev > 1) b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, ~B43_NPHY_TXPCTL_INIT_PIDXI1, 0x64); } if (dev->phy.rev >= 3) { if (nphy->tx_pwr_idx[0] != 128 && nphy->tx_pwr_idx[1] != 128) { /* Recover TX pwr ctl state */ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~B43_NPHY_TXPCTL_CMD_INIT, nphy->tx_pwr_idx[0]); if (dev->phy.rev > 1) b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, ~0xff, nphy->tx_pwr_idx[1]); } } if (dev->phy.rev >= 3) { b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100); b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100); } else { b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000); } if (dev->phy.rev == 2) b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b); else if (dev->phy.rev < 2) b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40); if (dev->phy.rev < 2 && dev->phy.is_40mhz) b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW); if (b43_nphy_ipa(dev)) { b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4); b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4); } } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */ static void b43_nphy_tx_power_fix(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; struct ssb_sprom *sprom = dev->dev->bus_sprom; u8 txpi[2], bbmult, i; u16 tmp, radio_gain, dac_gain; u16 freq = dev->phy.channel_freq; u32 txgain; /* u32 gaintbl; rev3+ */ if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); if (dev->phy.rev >= 7) { txpi[0] = txpi[1] = 30; } else if (dev->phy.rev >= 3) { txpi[0] = 40; txpi[1] = 40; } else if (sprom->revision < 4) { txpi[0] = 72; txpi[1] = 72; } else { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { txpi[0] = sprom->txpid2g[0]; txpi[1] = sprom->txpid2g[1]; } else if (freq >= 4900 && freq < 5100) { txpi[0] = sprom->txpid5gl[0]; txpi[1] = sprom->txpid5gl[1]; } else if (freq >= 5100 && freq < 5500) { txpi[0] = sprom->txpid5g[0]; txpi[1] = sprom->txpid5g[1]; } else if (freq >= 5500) { txpi[0] = sprom->txpid5gh[0]; txpi[1] = sprom->txpid5gh[1]; } else { txpi[0] = 91; txpi[1] = 91; } } if (dev->phy.rev < 7 && (txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 100)) txpi[0] = txpi[1] = 91; /* for (i = 0; i < 2; i++) { nphy->txpwrindex[i].index_internal = txpi[i]; nphy->txpwrindex[i].index_internal_save = txpi[i]; } */ for (i = 0; i < 2; i++) { txgain = *(b43_nphy_get_tx_gain_table(dev) + txpi[i]); if (dev->phy.rev >= 3) radio_gain = (txgain >> 16) & 0x1FFFF; else radio_gain = (txgain >> 16) & 0x1FFF; if (dev->phy.rev >= 7) dac_gain = (txgain >> 8) & 0x7; else dac_gain = (txgain >> 8) & 0x3F; bbmult = txgain & 0xFF; if (dev->phy.rev >= 3) { if (i == 0) b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); else b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); } else { b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); } if (i == 0) b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain); else b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain); b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain); tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57)); if (i == 0) tmp = (tmp & 0x00FF) | (bbmult << 8); else tmp = (tmp & 0xFF00) | bbmult; b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp); if (b43_nphy_ipa(dev)) { u32 tmp32; u16 reg = (i == 0) ? B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1; tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, 576 + txpi[i])); b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4); b43_phy_set(dev, reg, 0x4); } } b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; u8 core; u16 r; /* routing */ if (phy->rev >= 7) { for (core = 0; core < 2; core++) { r = core ? 0x190 : 0x170; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_radio_write(dev, r + 0x5, 0x5); b43_radio_write(dev, r + 0x9, 0xE); if (phy->rev != 5) b43_radio_write(dev, r + 0xA, 0); if (phy->rev != 7) b43_radio_write(dev, r + 0xB, 1); else b43_radio_write(dev, r + 0xB, 0x31); } else { b43_radio_write(dev, r + 0x5, 0x9); b43_radio_write(dev, r + 0x9, 0xC); b43_radio_write(dev, r + 0xB, 0x0); if (phy->rev != 5) b43_radio_write(dev, r + 0xA, 1); else b43_radio_write(dev, r + 0xA, 0x31); } b43_radio_write(dev, r + 0x6, 0); b43_radio_write(dev, r + 0x7, 0); b43_radio_write(dev, r + 0x8, 3); b43_radio_write(dev, r + 0xC, 0); } } else { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128); else b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80); b43_radio_write(dev, B2056_SYN_RESERVED_ADDR30, 0); b43_radio_write(dev, B2056_SYN_GPIO_MASTER1, 0x29); for (core = 0; core < 2; core++) { r = core ? B2056_TX1 : B2056_TX0; b43_radio_write(dev, r | B2056_TX_IQCAL_VCM_HG, 0); b43_radio_write(dev, r | B2056_TX_IQCAL_IDAC, 0); b43_radio_write(dev, r | B2056_TX_TSSI_VCM, 3); b43_radio_write(dev, r | B2056_TX_TX_AMP_DET, 0); b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8); b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0); b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER, 0x5); if (phy->rev != 5) b43_radio_write(dev, r | B2056_TX_TSSIA, 0x00); if (phy->rev >= 5) b43_radio_write(dev, r | B2056_TX_TSSIG, 0x31); else b43_radio_write(dev, r | B2056_TX_TSSIG, 0x11); b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX, 0xE); } else { b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER, 0x9); b43_radio_write(dev, r | B2056_TX_TSSIA, 0x31); b43_radio_write(dev, r | B2056_TX_TSSIG, 0x0); b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX, 0xC); } } } } /* * Stop radio and transmit known signal. Then check received signal strength to * get TSSI (Transmit Signal Strength Indicator). * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi */ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = dev->phy.n; u32 tmp; s32 rssi[4] = { }; /* TODO: check if we can transmit */ if (b43_nphy_ipa(dev)) b43_nphy_ipa_internal_tssi_setup(dev); if (phy->rev >= 7) ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */ else if (phy->rev >= 3) b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false); b43_nphy_stop_playback(dev); b43_nphy_tx_tone(dev, 0xFA0, 0, false, false); udelay(20); tmp = b43_nphy_poll_rssi(dev, 4, rssi, 1); b43_nphy_stop_playback(dev); b43_nphy_rssi_select(dev, 0, 0); if (phy->rev >= 7) ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */ else if (phy->rev >= 3) b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true); if (phy->rev >= 3) { nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF; nphy->pwr_ctl_info[1].idle_tssi_5g = (tmp >> 8) & 0xFF; } else { nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 16) & 0xFF; nphy->pwr_ctl_info[1].idle_tssi_5g = tmp & 0xFF; } nphy->pwr_ctl_info[0].idle_tssi_2g = (tmp >> 24) & 0xFF; nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF; } /* http://bcm-v4.sipsolutions.net/PHY/N/TxPwrLimitToTbl */ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u8 idx, delta; u8 i, stf_mode; for (i = 0; i < 4; i++) nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i]; for (stf_mode = 0; stf_mode < 4; stf_mode++) { delta = 0; switch (stf_mode) { case 0: if (dev->phy.is_40mhz && dev->phy.rev >= 5) { idx = 68; } else { delta = 1; idx = dev->phy.is_40mhz ? 52 : 4; } break; case 1: idx = dev->phy.is_40mhz ? 76 : 28; break; case 2: idx = dev->phy.is_40mhz ? 84 : 36; break; case 3: idx = dev->phy.is_40mhz ? 92 : 44; break; } for (i = 0; i < 20; i++) { nphy->adj_pwr_tbl[4 + 4 * i + stf_mode] = nphy->tx_power_offset[idx]; if (i == 0) idx += delta; if (i == 14) idx += 1 - delta; if (i == 3 || i == 4 || i == 7 || i == 8 || i == 11 || i == 13) idx += 1; } } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; struct ssb_sprom *sprom = dev->dev->bus_sprom; s16 a1[2], b0[2], b1[2]; u8 idle[2]; s8 target[2]; s32 num, den, pwr; u32 regval[64]; u16 freq = dev->phy.channel_freq; u16 tmp; u16 r; /* routing */ u8 i, c; if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) { b43_maskset32(dev, B43_MMIO_MACCTL, ~0, 0x200000); b43_read32(dev, B43_MMIO_MACCTL); udelay(1); } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); b43_phy_set(dev, B43_NPHY_TSSIMODE, B43_NPHY_TSSIMODE_EN); if (dev->phy.rev >= 3) b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~B43_NPHY_TXPCTL_CMD_PCTLEN & 0xFFFF); else b43_phy_set(dev, B43_NPHY_TXPCTL_CMD, B43_NPHY_TXPCTL_CMD_PCTLEN); if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) b43_maskset32(dev, B43_MMIO_MACCTL, ~0x200000, 0); if (sprom->revision < 4) { idle[0] = nphy->pwr_ctl_info[0].idle_tssi_2g; idle[1] = nphy->pwr_ctl_info[1].idle_tssi_2g; target[0] = target[1] = 52; a1[0] = a1[1] = -424; b0[0] = b0[1] = 5612; b1[0] = b1[1] = -1393; } else { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { for (c = 0; c < 2; c++) { idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g; target[c] = sprom->core_pwr_info[c].maxpwr_2g; a1[c] = sprom->core_pwr_info[c].pa_2g[0]; b0[c] = sprom->core_pwr_info[c].pa_2g[1]; b1[c] = sprom->core_pwr_info[c].pa_2g[2]; } } else if (freq >= 4900 && freq < 5100) { for (c = 0; c < 2; c++) { idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g; target[c] = sprom->core_pwr_info[c].maxpwr_5gl; a1[c] = sprom->core_pwr_info[c].pa_5gl[0]; b0[c] = sprom->core_pwr_info[c].pa_5gl[1]; b1[c] = sprom->core_pwr_info[c].pa_5gl[2]; } } else if (freq >= 5100 && freq < 5500) { for (c = 0; c < 2; c++) { idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g; target[c] = sprom->core_pwr_info[c].maxpwr_5g; a1[c] = sprom->core_pwr_info[c].pa_5g[0]; b0[c] = sprom->core_pwr_info[c].pa_5g[1]; b1[c] = sprom->core_pwr_info[c].pa_5g[2]; } } else if (freq >= 5500) { for (c = 0; c < 2; c++) { idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g; target[c] = sprom->core_pwr_info[c].maxpwr_5gh; a1[c] = sprom->core_pwr_info[c].pa_5gh[0]; b0[c] = sprom->core_pwr_info[c].pa_5gh[1]; b1[c] = sprom->core_pwr_info[c].pa_5gh[2]; } } else { idle[0] = nphy->pwr_ctl_info[0].idle_tssi_5g; idle[1] = nphy->pwr_ctl_info[1].idle_tssi_5g; target[0] = target[1] = 52; a1[0] = a1[1] = -424; b0[0] = b0[1] = 5612; b1[0] = b1[1] = -1393; } } /* target[0] = target[1] = nphy->tx_power_max; */ if (dev->phy.rev >= 3) { if (sprom->fem.ghz2.tssipos) b43_phy_set(dev, B43_NPHY_TXPCTL_ITSSI, 0x4000); if (dev->phy.rev >= 7) { for (c = 0; c < 2; c++) { r = c ? 0x190 : 0x170; if (b43_nphy_ipa(dev)) b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 0xE : 0xC); } } else { if (b43_nphy_ipa(dev)) { tmp = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 0xC : 0xE; b43_radio_write(dev, B2056_TX0 | B2056_TX_TX_SSI_MUX, tmp); b43_radio_write(dev, B2056_TX1 | B2056_TX_TX_SSI_MUX, tmp); } else { b43_radio_write(dev, B2056_TX0 | B2056_TX_TX_SSI_MUX, 0x11); b43_radio_write(dev, B2056_TX1 | B2056_TX_TX_SSI_MUX, 0x11); } } } if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) { b43_maskset32(dev, B43_MMIO_MACCTL, ~0, 0x200000); b43_read32(dev, B43_MMIO_MACCTL); udelay(1); } if (dev->phy.rev >= 7) { b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~B43_NPHY_TXPCTL_CMD_INIT, 0x19); b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, ~B43_NPHY_TXPCTL_INIT_PIDXI1, 0x19); } else { b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~B43_NPHY_TXPCTL_CMD_INIT, 0x40); if (dev->phy.rev > 1) b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, ~B43_NPHY_TXPCTL_INIT_PIDXI1, 0x40); } if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) b43_maskset32(dev, B43_MMIO_MACCTL, ~0x200000, 0); b43_phy_write(dev, B43_NPHY_TXPCTL_N, 0xF0 << B43_NPHY_TXPCTL_N_TSSID_SHIFT | 3 << B43_NPHY_TXPCTL_N_NPTIL2_SHIFT); b43_phy_write(dev, B43_NPHY_TXPCTL_ITSSI, idle[0] << B43_NPHY_TXPCTL_ITSSI_0_SHIFT | idle[1] << B43_NPHY_TXPCTL_ITSSI_1_SHIFT | B43_NPHY_TXPCTL_ITSSI_BINF); b43_phy_write(dev, B43_NPHY_TXPCTL_TPWR, target[0] << B43_NPHY_TXPCTL_TPWR_0_SHIFT | target[1] << B43_NPHY_TXPCTL_TPWR_1_SHIFT); for (c = 0; c < 2; c++) { for (i = 0; i < 64; i++) { num = 8 * (16 * b0[c] + b1[c] * i); den = 32768 + a1[c] * i; pwr = max((4 * num + den / 2) / den, -8); if (dev->phy.rev < 3 && (i <= (31 - idle[c] + 1))) pwr = max(pwr, target[c] + 1); regval[i] = pwr; } b43_ntab_write_bulk(dev, B43_NTAB32(26 + c, 0), 64, regval); } b43_nphy_tx_prepare_adjusted_power_table(dev); /* b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl); b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl); */ if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); } static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; const u32 *table = NULL; u32 rfpwr_offset; u8 pga_gain; int i; table = b43_nphy_get_tx_gain_table(dev); b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table); b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table); if (phy->rev >= 3) { #if 0 nphy->gmval = (table[0] >> 16) & 0x7000; #endif for (i = 0; i < 128; i++) { pga_gain = (table[i] >> 24) & 0xF; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain]; else rfpwr_offset = 0; /* FIXME */ b43_ntab_write(dev, B43_NTAB32(26, 576 + i), rfpwr_offset); b43_ntab_write(dev, B43_NTAB32(27, 576 + i), rfpwr_offset); } } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) { struct b43_phy_n *nphy = dev->phy.n; enum ieee80211_band band; u16 tmp; if (!enable) { nphy->rfctrl_intc1_save = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); nphy->rfctrl_intc2_save = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); band = b43_current_band(dev->wl); if (dev->phy.rev >= 3) { if (band == IEEE80211_BAND_5GHZ) tmp = 0x600; else tmp = 0x480; } else { if (band == IEEE80211_BAND_5GHZ) tmp = 0x180; else tmp = 0x120; } b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp); } else { b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, nphy->rfctrl_intc1_save); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, nphy->rfctrl_intc2_save); } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */ static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev) { u16 tmp; if (dev->phy.rev >= 3) { if (b43_nphy_ipa(dev)) { tmp = 4; b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2, (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); } tmp = 1; b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2, (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */ static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est, u16 samps, u8 time, bool wait) { int i; u16 tmp; b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps); b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time); if (wait) b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE); else b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE); b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START); for (i = 1000; i; i--) { tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD); if (!(tmp & B43_NPHY_IQEST_CMD_START)) { est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0); est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0); est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0); est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1); est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1); est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1); return; } udelay(10); } memset(est, 0, sizeof(*est)); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */ static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write, struct b43_phy_n_iq_comp *pcomp) { if (write) { b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0); b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0); b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1); b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1); } else { pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0); pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0); pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1); pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1); } } #if 0 /* Ready but not used anywhere */ /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */ static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core) { u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]); if (core == 0) { b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]); } else { b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]); } b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]); b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]); b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]); b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]); b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]); b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]); b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core) { u8 rxval, txval; u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA); if (core == 0) { regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); } else { regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); } regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1); regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0); regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1); b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF, ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT)); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN, (core << B43_NPHY_RFSEQCA_RXEN_SHIFT)); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS, (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT)); if (core == 0) { b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007); b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007); } else { b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007); b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007); } b43_nphy_rf_control_intc_override(dev, 2, 0, 3); b43_nphy_rf_control_override(dev, 8, 0, 3, false); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); if (core == 0) { rxval = 1; txval = 8; } else { rxval = 4; txval = 2; } b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1)); b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core)); } #endif /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */ static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask) { int i; s32 iq; u32 ii; u32 qq; int iq_nbits, qq_nbits; int arsh, brsh; u16 tmp, a, b; struct nphy_iq_est est; struct b43_phy_n_iq_comp old; struct b43_phy_n_iq_comp new = { }; bool error = false; if (mask == 0) return; b43_nphy_rx_iq_coeffs(dev, false, &old); b43_nphy_rx_iq_coeffs(dev, true, &new); b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false); new = old; for (i = 0; i < 2; i++) { if (i == 0 && (mask & 1)) { iq = est.iq0_prod; ii = est.i0_pwr; qq = est.q0_pwr; } else if (i == 1 && (mask & 2)) { iq = est.iq1_prod; ii = est.i1_pwr; qq = est.q1_pwr; } else { continue; } if (ii + qq < 2) { error = true; break; } iq_nbits = fls(abs(iq)); qq_nbits = fls(qq); arsh = iq_nbits - 20; if (arsh >= 0) { a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh))); tmp = ii >> arsh; } else { a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh))); tmp = ii << -arsh; } if (tmp == 0) { error = true; break; } a /= tmp; brsh = qq_nbits - 11; if (brsh >= 0) { b = (qq << (31 - qq_nbits)); tmp = ii >> brsh; } else { b = (qq << (31 - qq_nbits)); tmp = ii << -brsh; } if (tmp == 0) { error = true; break; } b = int_sqrt(b / tmp - a * a) - (1 << 10); if (i == 0 && (mask & 0x1)) { if (dev->phy.rev >= 3) { new.a0 = a & 0x3FF; new.b0 = b & 0x3FF; } else { new.a0 = b & 0x3FF; new.b0 = a & 0x3FF; } } else if (i == 1 && (mask & 0x2)) { if (dev->phy.rev >= 3) { new.a1 = a & 0x3FF; new.b1 = b & 0x3FF; } else { new.a1 = b & 0x3FF; new.b1 = a & 0x3FF; } } } if (error) new = old; b43_nphy_rx_iq_coeffs(dev, true, &new); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */ static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev) { u16 array[4]; b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */ static void b43_nphy_spur_workaround(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u8 channel = dev->phy.channel; int tone[2] = { 57, 58 }; u32 noise[2] = { 0x3FF, 0x3FF }; B43_WARN_ON(dev->phy.rev < 3); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); if (nphy->gband_spurwar_en) { /* TODO: N PHY Adjust Analog Pfbw (7) */ if (channel == 11 && dev->phy.is_40mhz) ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/ else ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ /* TODO: N PHY Adjust CRS Min Power (0x1E) */ } if (nphy->aband_spurwar_en) { if (channel == 54) { tone[0] = 0x20; noise[0] = 0x25F; } else if (channel == 38 || channel == 102 || channel == 118) { if (0 /* FIXME */) { tone[0] = 0x20; noise[0] = 0x21F; } else { tone[0] = 0; noise[0] = 0; } } else if (channel == 134) { tone[0] = 0x20; noise[0] = 0x21F; } else if (channel == 151) { tone[0] = 0x10; noise[0] = 0x23F; } else if (channel == 153 || channel == 161) { tone[0] = 0x30; noise[0] = 0x23F; } else { tone[0] = 0; noise[0] = 0; } if (!tone[0] && !noise[0]) ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/ else ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */ static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; int i, j; u32 tmp; u32 cur_real, cur_imag, real_part, imag_part; u16 buffer[7]; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); for (i = 0; i < 2; i++) { tmp = ((buffer[i * 2] & 0x3FF) << 10) | (buffer[i * 2 + 1] & 0x3FF); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, (((i + 26) << 10) | 320)); for (j = 0; j < 128; j++) { b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, ((tmp >> 16) & 0xFFFF)); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (tmp & 0xFFFF)); } } for (i = 0; i < 2; i++) { tmp = buffer[5 + i]; real_part = (tmp >> 8) & 0xFF; imag_part = (tmp & 0xFF); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, (((i + 26) << 10) | 448)); if (dev->phy.rev >= 3) { cur_real = real_part; cur_imag = imag_part; tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF); } for (j = 0; j < 128; j++) { if (dev->phy.rev < 3) { cur_real = (real_part * loscale[j] + 128) >> 8; cur_imag = (imag_part * loscale[j] + 128) >> 8; tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF); } b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, ((tmp >> 16) & 0xFFFF)); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (tmp & 0xFFFF)); } } if (dev->phy.rev >= 3) { b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF); } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); } /* * Restore RSSI Calibration * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal */ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 *rssical_radio_regs = NULL; u16 *rssical_phy_regs = NULL; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { if (!nphy->rssical_chanspec_2G.center_freq) return; rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G; } else { if (!nphy->rssical_chanspec_5G.center_freq) return; rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G; rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G; } /* TODO use some definitions */ b43_radio_maskset(dev, 0x602B, 0xE3, rssical_radio_regs[0]); b43_radio_maskset(dev, 0x702B, 0xE3, rssical_radio_regs[1]); b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]); b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]); b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, rssical_phy_regs[2]); b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, rssical_phy_regs[3]); b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, rssical_phy_regs[4]); b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, rssical_phy_regs[5]); b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, rssical_phy_regs[6]); b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, rssical_phy_regs[7]); b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, rssical_phy_regs[8]); b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, rssical_phy_regs[9]); b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, rssical_phy_regs[10]); b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 *save = nphy->tx_rx_cal_radio_saveregs; u16 tmp; u8 offset, i; if (dev->phy.rev >= 3) { for (i = 0; i < 2; i++) { tmp = (i == 0) ? 0x2000 : 0x3000; offset = i * 11; save[offset + 0] = b43_radio_read16(dev, B2055_CAL_RVARCTL); save[offset + 1] = b43_radio_read16(dev, B2055_CAL_LPOCTL); save[offset + 2] = b43_radio_read16(dev, B2055_CAL_TS); save[offset + 3] = b43_radio_read16(dev, B2055_CAL_RCCALRTS); save[offset + 4] = b43_radio_read16(dev, B2055_CAL_RCALRTS); save[offset + 5] = b43_radio_read16(dev, B2055_PADDRV); save[offset + 6] = b43_radio_read16(dev, B2055_XOCTL1); save[offset + 7] = b43_radio_read16(dev, B2055_XOCTL2); save[offset + 8] = b43_radio_read16(dev, B2055_XOREGUL); save[offset + 9] = b43_radio_read16(dev, B2055_XOMISC); save[offset + 10] = b43_radio_read16(dev, B2055_PLL_LFC1); if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x0A); b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40); b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55); b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0); b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0); if (nphy->ipa5g_on) { b43_radio_write16(dev, tmp | B2055_PADDRV, 4); b43_radio_write16(dev, tmp | B2055_XOCTL1, 1); } else { b43_radio_write16(dev, tmp | B2055_PADDRV, 0); b43_radio_write16(dev, tmp | B2055_XOCTL1, 0x2F); } b43_radio_write16(dev, tmp | B2055_XOCTL2, 0); } else { b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x06); b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40); b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55); b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0); b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0); b43_radio_write16(dev, tmp | B2055_XOCTL1, 0); if (nphy->ipa2g_on) { b43_radio_write16(dev, tmp | B2055_PADDRV, 6); b43_radio_write16(dev, tmp | B2055_XOCTL2, (dev->phy.rev < 5) ? 0x11 : 0x01); } else { b43_radio_write16(dev, tmp | B2055_PADDRV, 0); b43_radio_write16(dev, tmp | B2055_XOCTL2, 0); } } b43_radio_write16(dev, tmp | B2055_XOREGUL, 0); b43_radio_write16(dev, tmp | B2055_XOMISC, 0); b43_radio_write16(dev, tmp | B2055_PLL_LFC1, 0); } } else { save[0] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL1); b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL1, 0x29); save[1] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL2); b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL2, 0x54); save[2] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL1); b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL1, 0x29); save[3] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL2); b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL2, 0x54); save[3] = b43_radio_read16(dev, B2055_C1_PWRDET_RXTX); save[4] = b43_radio_read16(dev, B2055_C2_PWRDET_RXTX); if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ)) { b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x04); b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x04); } else { b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x20); b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x20); } if (dev->phy.rev < 2) { b43_radio_set(dev, B2055_C1_TX_BB_MXGM, 0x20); b43_radio_set(dev, B2055_C2_TX_BB_MXGM, 0x20); } else { b43_radio_mask(dev, B2055_C1_TX_BB_MXGM, ~0x20); b43_radio_mask(dev, B2055_C2_TX_BB_MXGM, ~0x20); } } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */ static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core) { struct b43_phy_n *nphy = dev->phy.n; int i; u16 scale, entry; u16 tmp = nphy->txcal_bbmult; if (core == 0) tmp >>= 8; tmp &= 0xff; for (i = 0; i < 18; i++) { scale = (ladder_lo[i].percent * tmp) / 100; entry = ((scale & 0xFF) << 8) | ladder_lo[i].g_env; b43_ntab_write(dev, B43_NTAB16(15, i), entry); scale = (ladder_iq[i].percent * tmp) / 100; entry = ((scale & 0xFF) << 8) | ladder_iq[i].g_env; b43_ntab_write(dev, B43_NTAB16(15, i + 32), entry); } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */ static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev) { int i; for (i = 0; i < 15; i++) b43_phy_write(dev, B43_PHY_N(0x2C5 + i), tbl_tx_filter_coef_rev4[2][i]); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev) { int i, j; /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */ static const u16 offset[] = { 0x186, 0x195, 0x2C5 }; for (i = 0; i < 3; i++) for (j = 0; j < 15; j++) b43_phy_write(dev, B43_PHY_N(offset[i] + j), tbl_tx_filter_coef_rev4[i][j]); if (dev->phy.is_40mhz) { for (j = 0; j < 15; j++) b43_phy_write(dev, B43_PHY_N(offset[0] + j), tbl_tx_filter_coef_rev4[3][j]); } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { for (j = 0; j < 15; j++) b43_phy_write(dev, B43_PHY_N(offset[0] + j), tbl_tx_filter_coef_rev4[5][j]); } if (dev->phy.channel == 14) for (j = 0; j < 15; j++) b43_phy_write(dev, B43_PHY_N(offset[0] + j), tbl_tx_filter_coef_rev4[6][j]); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */ static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 curr_gain[2]; struct nphy_txgains target; const u32 *table = NULL; if (!nphy->txpwrctrl) { int i; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, curr_gain); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); for (i = 0; i < 2; ++i) { if (dev->phy.rev >= 3) { target.ipa[i] = curr_gain[i] & 0x000F; target.pad[i] = (curr_gain[i] & 0x00F0) >> 4; target.pga[i] = (curr_gain[i] & 0x0F00) >> 8; target.txgm[i] = (curr_gain[i] & 0x7000) >> 12; } else { target.ipa[i] = curr_gain[i] & 0x0003; target.pad[i] = (curr_gain[i] & 0x000C) >> 2; target.pga[i] = (curr_gain[i] & 0x0070) >> 4; target.txgm[i] = (curr_gain[i] & 0x0380) >> 7; } } } else { int i; u16 index[2]; index[0] = (b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) & B43_NPHY_TXPCTL_STAT_BIDX) >> B43_NPHY_TXPCTL_STAT_BIDX_SHIFT; index[1] = (b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) & B43_NPHY_TXPCTL_STAT_BIDX) >> B43_NPHY_TXPCTL_STAT_BIDX_SHIFT; for (i = 0; i < 2; ++i) { table = b43_nphy_get_tx_gain_table(dev); if (dev->phy.rev >= 3) { target.ipa[i] = (table[index[i]] >> 16) & 0xF; target.pad[i] = (table[index[i]] >> 20) & 0xF; target.pga[i] = (table[index[i]] >> 24) & 0xF; target.txgm[i] = (table[index[i]] >> 28) & 0xF; } else { target.ipa[i] = (table[index[i]] >> 16) & 0x3; target.pad[i] = (table[index[i]] >> 18) & 0x3; target.pga[i] = (table[index[i]] >> 20) & 0x7; target.txgm[i] = (table[index[i]] >> 23) & 0x7; } } } return target; } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */ static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev) { u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; if (dev->phy.rev >= 3) { b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[0]); b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[3]); b43_phy_write(dev, B43_NPHY_BBCFG, regs[4]); b43_ntab_write(dev, B43_NTAB16(8, 3), regs[5]); b43_ntab_write(dev, B43_NTAB16(8, 19), regs[6]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[7]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[8]); b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]); b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); b43_nphy_reset_cca(dev); } else { b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, regs[0]); b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, regs[1]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]); b43_ntab_write(dev, B43_NTAB16(8, 2), regs[3]); b43_ntab_write(dev, B43_NTAB16(8, 18), regs[4]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[5]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[6]); } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev) { u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; u16 tmp; regs[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); if (dev->phy.rev >= 3) { b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0xF0FF, 0x0A00); b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0xF0FF, 0x0A00); tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); regs[2] = tmp; b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, tmp | 0x0600); tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); regs[3] = tmp; b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x0600); regs[4] = b43_phy_read(dev, B43_NPHY_BBCFG); b43_phy_mask(dev, B43_NPHY_BBCFG, ~B43_NPHY_BBCFG_RSTRX & 0xFFFF); tmp = b43_ntab_read(dev, B43_NTAB16(8, 3)); regs[5] = tmp; b43_ntab_write(dev, B43_NTAB16(8, 3), 0); tmp = b43_ntab_read(dev, B43_NTAB16(8, 19)); regs[6] = tmp; b43_ntab_write(dev, B43_NTAB16(8, 19), 0); regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); b43_nphy_rf_control_intc_override(dev, 2, 1, 3); b43_nphy_rf_control_intc_override(dev, 1, 2, 1); b43_nphy_rf_control_intc_override(dev, 1, 8, 2); regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0); regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1); b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); } else { b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, 0xA000); b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, 0xA000); tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); regs[2] = tmp; b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x3000); tmp = b43_ntab_read(dev, B43_NTAB16(8, 2)); regs[3] = tmp; tmp |= 0x2000; b43_ntab_write(dev, B43_NTAB16(8, 2), tmp); tmp = b43_ntab_read(dev, B43_NTAB16(8, 18)); regs[4] = tmp; tmp |= 0x2000; b43_ntab_write(dev, B43_NTAB16(8, 18), tmp); regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) tmp = 0x0180; else tmp = 0x0120; b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp); } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */ static void b43_nphy_save_cal(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; u16 *txcal_radio_regs = NULL; struct b43_chanspec *iqcal_chanspec; u16 *table = NULL; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G; txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G; iqcal_chanspec = &nphy->iqcal_chanspec_2G; table = nphy->cal_cache.txcal_coeffs_2G; } else { rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G; txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G; iqcal_chanspec = &nphy->iqcal_chanspec_5G; table = nphy->cal_cache.txcal_coeffs_5G; } b43_nphy_rx_iq_coeffs(dev, false, rxcal_coeffs); /* TODO use some definitions */ if (dev->phy.rev >= 3) { txcal_radio_regs[0] = b43_radio_read(dev, 0x2021); txcal_radio_regs[1] = b43_radio_read(dev, 0x2022); txcal_radio_regs[2] = b43_radio_read(dev, 0x3021); txcal_radio_regs[3] = b43_radio_read(dev, 0x3022); txcal_radio_regs[4] = b43_radio_read(dev, 0x2023); txcal_radio_regs[5] = b43_radio_read(dev, 0x2024); txcal_radio_regs[6] = b43_radio_read(dev, 0x3023); txcal_radio_regs[7] = b43_radio_read(dev, 0x3024); } else { txcal_radio_regs[0] = b43_radio_read(dev, 0x8B); txcal_radio_regs[1] = b43_radio_read(dev, 0xBA); txcal_radio_regs[2] = b43_radio_read(dev, 0x8D); txcal_radio_regs[3] = b43_radio_read(dev, 0xBC); } iqcal_chanspec->center_freq = dev->phy.channel_freq; iqcal_chanspec->channel_type = dev->phy.channel_type; b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 8, table); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */ static void b43_nphy_restore_cal(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 coef[4]; u16 *loft = NULL; u16 *table = NULL; int i; u16 *txcal_radio_regs = NULL; struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { if (!nphy->iqcal_chanspec_2G.center_freq) return; table = nphy->cal_cache.txcal_coeffs_2G; loft = &nphy->cal_cache.txcal_coeffs_2G[5]; } else { if (!nphy->iqcal_chanspec_5G.center_freq) return; table = nphy->cal_cache.txcal_coeffs_5G; loft = &nphy->cal_cache.txcal_coeffs_5G[5]; } b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, table); for (i = 0; i < 4; i++) { if (dev->phy.rev >= 3) table[i] = coef[i]; else coef[i] = 0; } b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, coef); b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, loft); b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, loft); if (dev->phy.rev < 2) b43_nphy_tx_iq_workaround(dev); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G; rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G; } else { txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G; rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G; } /* TODO use some definitions */ if (dev->phy.rev >= 3) { b43_radio_write(dev, 0x2021, txcal_radio_regs[0]); b43_radio_write(dev, 0x2022, txcal_radio_regs[1]); b43_radio_write(dev, 0x3021, txcal_radio_regs[2]); b43_radio_write(dev, 0x3022, txcal_radio_regs[3]); b43_radio_write(dev, 0x2023, txcal_radio_regs[4]); b43_radio_write(dev, 0x2024, txcal_radio_regs[5]); b43_radio_write(dev, 0x3023, txcal_radio_regs[6]); b43_radio_write(dev, 0x3024, txcal_radio_regs[7]); } else { b43_radio_write(dev, 0x8B, txcal_radio_regs[0]); b43_radio_write(dev, 0xBA, txcal_radio_regs[1]); b43_radio_write(dev, 0x8D, txcal_radio_regs[2]); b43_radio_write(dev, 0xBC, txcal_radio_regs[3]); } b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, struct nphy_txgains target, bool full, bool mphase) { struct b43_phy_n *nphy = dev->phy.n; int i; int error = 0; int freq; bool avoid = false; u8 length; u16 tmp, core, type, count, max, numb, last = 0, cmd; const u16 *table; bool phy6or5x; u16 buffer[11]; u16 diq_start = 0; u16 save[2]; u16 gain[2]; struct nphy_iqcal_params params[2]; bool updated[2] = { }; b43_nphy_stay_in_carrier_search(dev, true); if (dev->phy.rev >= 4) { avoid = nphy->hang_avoid; nphy->hang_avoid = false; } b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save); for (i = 0; i < 2; i++) { b43_nphy_iq_cal_gain_params(dev, i, target, &params[i]); gain[i] = params[i].cal_gain; } b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain); b43_nphy_tx_cal_radio_setup(dev); b43_nphy_tx_cal_phy_setup(dev); phy6or5x = dev->phy.rev >= 6 || (dev->phy.rev == 5 && nphy->ipa2g_on && b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ); if (phy6or5x) { if (dev->phy.is_40mhz) { b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18, tbl_tx_iqlo_cal_loft_ladder_40); b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18, tbl_tx_iqlo_cal_iqimb_ladder_40); } else { b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18, tbl_tx_iqlo_cal_loft_ladder_20); b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18, tbl_tx_iqlo_cal_iqimb_ladder_20); } } b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9); if (!dev->phy.is_40mhz) freq = 2500; else freq = 5000; if (nphy->mphase_cal_phase_id > 2) b43_nphy_run_samples(dev, (dev->phy.is_40mhz ? 40 : 20) * 8, 0xFFFF, 0, true, false); else error = b43_nphy_tx_tone(dev, freq, 250, true, false); if (error == 0) { if (nphy->mphase_cal_phase_id > 2) { table = nphy->mphase_txcal_bestcoeffs; length = 11; if (dev->phy.rev < 3) length -= 2; } else { if (!full && nphy->txiqlocal_coeffsvalid) { table = nphy->txiqlocal_bestc; length = 11; if (dev->phy.rev < 3) length -= 2; } else { full = true; if (dev->phy.rev >= 3) { table = tbl_tx_iqlo_cal_startcoefs_nphyrev3; length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3; } else { table = tbl_tx_iqlo_cal_startcoefs; length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS; } } } b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, table); if (full) { if (dev->phy.rev >= 3) max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3; else max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL; } else { if (dev->phy.rev >= 3) max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3; else max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL; } if (mphase) { count = nphy->mphase_txcal_cmdidx; numb = min(max, (u16)(count + nphy->mphase_txcal_numcmds)); } else { count = 0; numb = max; } for (; count < numb; count++) { if (full) { if (dev->phy.rev >= 3) cmd = tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[count]; else cmd = tbl_tx_iqlo_cal_cmds_fullcal[count]; } else { if (dev->phy.rev >= 3) cmd = tbl_tx_iqlo_cal_cmds_recal_nphyrev3[count]; else cmd = tbl_tx_iqlo_cal_cmds_recal[count]; } core = (cmd & 0x3000) >> 12; type = (cmd & 0x0F00) >> 8; if (phy6or5x && updated[core] == 0) { b43_nphy_update_tx_cal_ladder(dev, core); updated[core] = true; } tmp = (params[core].ncorr[type] << 8) | 0x66; b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDNNUM, tmp); if (type == 1 || type == 3 || type == 4) { buffer[0] = b43_ntab_read(dev, B43_NTAB16(15, 69 + core)); diq_start = buffer[0]; buffer[0] = 0; b43_ntab_write(dev, B43_NTAB16(15, 69 + core), 0); } b43_phy_write(dev, B43_NPHY_IQLOCAL_CMD, cmd); for (i = 0; i < 2000; i++) { tmp = b43_phy_read(dev, B43_NPHY_IQLOCAL_CMD); if (tmp & 0xC000) break; udelay(10); } b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, buffer); b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, buffer); if (type == 1 || type == 3 || type == 4) buffer[0] = diq_start; } if (mphase) nphy->mphase_txcal_cmdidx = (numb >= max) ? 0 : numb; last = (dev->phy.rev < 3) ? 6 : 7; if (!mphase || nphy->mphase_cal_phase_id == last) { b43_ntab_write_bulk(dev, B43_NTAB16(15, 96), 4, buffer); b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 4, buffer); if (dev->phy.rev < 3) { buffer[0] = 0; buffer[1] = 0; buffer[2] = 0; buffer[3] = 0; } b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, buffer); b43_ntab_read_bulk(dev, B43_NTAB16(15, 101), 2, buffer); b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, buffer); b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, buffer); length = 11; if (dev->phy.rev < 3) length -= 2; b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, nphy->txiqlocal_bestc); nphy->txiqlocal_coeffsvalid = true; nphy->txiqlocal_chanspec.center_freq = dev->phy.channel_freq; nphy->txiqlocal_chanspec.channel_type = dev->phy.channel_type; } else { length = 11; if (dev->phy.rev < 3) length -= 2; b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, nphy->mphase_txcal_bestcoeffs); } b43_nphy_stop_playback(dev); b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0); } b43_nphy_tx_cal_phy_cleanup(dev); b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, save); if (dev->phy.rev < 2 && (!mphase || nphy->mphase_cal_phase_id == last)) b43_nphy_tx_iq_workaround(dev); if (dev->phy.rev >= 4) nphy->hang_avoid = avoid; b43_nphy_stay_in_carrier_search(dev, false); return error; } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u8 i; u16 buffer[7]; bool equal = true; if (!nphy->txiqlocal_coeffsvalid || nphy->txiqlocal_chanspec.center_freq != dev->phy.channel_freq || nphy->txiqlocal_chanspec.channel_type != dev->phy.channel_type) return; b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); for (i = 0; i < 4; i++) { if (buffer[i] != nphy->txiqlocal_bestc[i]) { equal = false; break; } } if (!equal) { b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, nphy->txiqlocal_bestc); for (i = 0; i < 4; i++) buffer[i] = 0; b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, buffer); b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, &nphy->txiqlocal_bestc[5]); b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, &nphy->txiqlocal_bestc[5]); } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev, struct nphy_txgains target, u8 type, bool debug) { struct b43_phy_n *nphy = dev->phy.n; int i, j, index; u8 rfctl[2]; u8 afectl_core; u16 tmp[6]; u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna; u32 real, imag; enum ieee80211_band band; u8 use; u16 cur_hpf; u16 lna[3] = { 3, 3, 1 }; u16 hpf1[3] = { 7, 2, 0 }; u16 hpf2[3] = { 2, 0, 0 }; u32 power[3] = { }; u16 gain_save[2]; u16 cal_gain[2]; struct nphy_iqcal_params cal_params[2]; struct nphy_iq_est est; int ret = 0; bool playtone = true; int desired = 13; b43_nphy_stay_in_carrier_search(dev, 1); if (dev->phy.rev < 2) b43_nphy_reapply_tx_cal_coeffs(dev); b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save); for (i = 0; i < 2; i++) { b43_nphy_iq_cal_gain_params(dev, i, target, &cal_params[i]); cal_gain[i] = cal_params[i].cal_gain; } b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, cal_gain); for (i = 0; i < 2; i++) { if (i == 0) { rfctl[0] = B43_NPHY_RFCTL_INTC1; rfctl[1] = B43_NPHY_RFCTL_INTC2; afectl_core = B43_NPHY_AFECTL_C1; } else { rfctl[0] = B43_NPHY_RFCTL_INTC2; rfctl[1] = B43_NPHY_RFCTL_INTC1; afectl_core = B43_NPHY_AFECTL_C2; } tmp[1] = b43_phy_read(dev, B43_NPHY_RFSEQCA); tmp[2] = b43_phy_read(dev, afectl_core); tmp[3] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); tmp[4] = b43_phy_read(dev, rfctl[0]); tmp[5] = b43_phy_read(dev, rfctl[1]); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF, ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, (1 - i)); b43_phy_set(dev, afectl_core, 0x0006); b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0006); band = b43_current_band(dev->wl); if (nphy->rxcalparams & 0xFF000000) { if (band == IEEE80211_BAND_5GHZ) b43_phy_write(dev, rfctl[0], 0x140); else b43_phy_write(dev, rfctl[0], 0x110); } else { if (band == IEEE80211_BAND_5GHZ) b43_phy_write(dev, rfctl[0], 0x180); else b43_phy_write(dev, rfctl[0], 0x120); } if (band == IEEE80211_BAND_5GHZ) b43_phy_write(dev, rfctl[1], 0x148); else b43_phy_write(dev, rfctl[1], 0x114); if (nphy->rxcalparams & 0x10000) { b43_radio_maskset(dev, B2055_C1_GENSPARE2, 0xFC, (i + 1)); b43_radio_maskset(dev, B2055_C2_GENSPARE2, 0xFC, (2 - i)); } for (j = 0; j < 4; j++) { if (j < 3) { cur_lna = lna[j]; cur_hpf1 = hpf1[j]; cur_hpf2 = hpf2[j]; } else { if (power[1] > 10000) { use = 1; cur_hpf = cur_hpf1; index = 2; } else { if (power[0] > 10000) { use = 1; cur_hpf = cur_hpf1; index = 1; } else { index = 0; use = 2; cur_hpf = cur_hpf2; } } cur_lna = lna[index]; cur_hpf1 = hpf1[index]; cur_hpf2 = hpf2[index]; cur_hpf += desired - hweight32(power[index]); cur_hpf = clamp_val(cur_hpf, 0, 10); if (use == 1) cur_hpf1 = cur_hpf; else cur_hpf2 = cur_hpf; } tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) | (cur_lna << 2)); b43_nphy_rf_control_override(dev, 0x400, tmp[0], 3, false); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); b43_nphy_stop_playback(dev); if (playtone) { ret = b43_nphy_tx_tone(dev, 4000, (nphy->rxcalparams & 0xFFFF), false, false); playtone = false; } else { b43_nphy_run_samples(dev, 160, 0xFFFF, 0, false, false); } if (ret == 0) { if (j < 3) { b43_nphy_rx_iq_est(dev, &est, 1024, 32, false); if (i == 0) { real = est.i0_pwr; imag = est.q0_pwr; } else { real = est.i1_pwr; imag = est.q1_pwr; } power[i] = ((real + imag) / 1024) + 1; } else { b43_nphy_calc_rx_iq_comp(dev, 1 << i); } b43_nphy_stop_playback(dev); } if (ret != 0) break; } b43_radio_mask(dev, B2055_C1_GENSPARE2, 0xFC); b43_radio_mask(dev, B2055_C2_GENSPARE2, 0xFC); b43_phy_write(dev, rfctl[1], tmp[5]); b43_phy_write(dev, rfctl[0], tmp[4]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp[3]); b43_phy_write(dev, afectl_core, tmp[2]); b43_phy_write(dev, B43_NPHY_RFSEQCA, tmp[1]); if (ret != 0) break; } b43_nphy_rf_control_override(dev, 0x400, 0, 3, true); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save); b43_nphy_stay_in_carrier_search(dev, 0); return ret; } static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev, struct nphy_txgains target, u8 type, bool debug) { return -1; } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */ static int b43_nphy_cal_rx_iq(struct b43_wldev *dev, struct nphy_txgains target, u8 type, bool debug) { if (dev->phy.rev >= 3) return b43_nphy_rev3_cal_rx_iq(dev, target, type, debug); else return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; /* u16 buf[16]; it's rev3+ */ nphy->phyrxchain = mask; if (0 /* FIXME clk */) return; b43_mac_suspend(dev); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN, (mask & 0x3) << B43_NPHY_RFSEQCA_RXEN_SHIFT); if ((mask & 0x3) != 0x3) { b43_phy_write(dev, B43_NPHY_HPANT_SWTHRES, 1); if (dev->phy.rev >= 3) { /* TODO */ } } else { b43_phy_write(dev, B43_NPHY_HPANT_SWTHRES, 0x1E); if (dev->phy.rev >= 3) { /* TODO */ } } b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); b43_mac_enable(dev); } /************************************************** * N-PHY init **************************************************/ /* * Upload the N-PHY tables. * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */ static void b43_nphy_tables_init(struct b43_wldev *dev) { if (dev->phy.rev < 3) b43_nphy_rev0_1_2_tables_init(dev); else b43_nphy_rev3plus_tables_init(dev); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */ static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble) { u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG); mimocfg |= B43_NPHY_MIMOCFG_AUTO; if (preamble == 1) mimocfg |= B43_NPHY_MIMOCFG_GFMIX; else mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX; b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */ static void b43_nphy_bphy_init(struct b43_wldev *dev) { unsigned int i; u16 val; val = 0x1E1F; for (i = 0; i < 16; i++) { b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val); val -= 0x202; } val = 0x3E3F; for (i = 0; i < 16; i++) { b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val); val -= 0x202; } b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */ static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init) { if (dev->phy.rev >= 3) { if (!init) return; if (0 /* FIXME */) { b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211); b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222); b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144); b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188); } } else { b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0); b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0); switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, 0xFC00, 0xFC00); break; #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco, 0xFC00, 0xFC00); break; #endif } b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_GPOUTSMSK, 0); b43_maskset16(dev, B43_MMIO_GPIO_MASK, ~0, 0xFC00); b43_maskset16(dev, B43_MMIO_GPIO_CONTROL, (~0xFC00 & 0xFFFF), 0); if (init) { b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); } } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */ int b43_phy_initn(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; u8 tx_pwr_state; struct nphy_txgains target; u16 tmp; enum ieee80211_band tmp2; bool do_rssi_cal; u16 clip[2]; bool do_cal = false; if ((dev->phy.rev >= 3) && (sprom->boardflags_lo & B43_BFL_EXTLNA) && (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) { switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: bcma_cc_set32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_CHIPCTL, 0x40); break; #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: chipco_set32(&dev->dev->sdev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40); break; #endif } } nphy->deaf_count = 0; b43_nphy_tables_init(dev); nphy->crsminpwr_adjusted = false; nphy->noisevars_adjusted = false; /* Clear all overrides */ if (dev->phy.rev >= 3) { b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, 0); b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0); b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, 0); b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, 0); } else { b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0); } b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0); if (dev->phy.rev < 6) { b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0); b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0); } b43_phy_mask(dev, B43_NPHY_RFSEQMODE, ~(B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER)); if (dev->phy.rev >= 3) b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, 0); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0); if (dev->phy.rev <= 2) { tmp = (dev->phy.rev == 2) ? 0x3B : 0x40; b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~B43_NPHY_BPHY_CTL3_SCALE, tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT); } b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20); b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20); if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD || (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE && dev->dev->board_type == 0x8B)) b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0); else b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8); b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 0xC8); b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x50); b43_phy_write(dev, B43_NPHY_TXRIFS_FRDEL, 0x30); b43_nphy_update_mimo_config(dev, nphy->preamble_override); b43_nphy_update_txrx_chain(dev); if (phy->rev < 2) { b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8); b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4); } tmp2 = b43_current_band(dev->wl); if (b43_nphy_ipa(dev)) { b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1); b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F, nphy->papd_epsilon_offset[0] << 7); b43_phy_set(dev, B43_NPHY_PAPD_EN1, 0x1); b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ1, 0x007F, nphy->papd_epsilon_offset[1] << 7); b43_nphy_int_pa_set_tx_dig_filters(dev); } else if (phy->rev >= 5) { b43_nphy_ext_pa_set_tx_dig_filters(dev); } b43_nphy_workarounds(dev); /* Reset CCA, in init code it differs a little from standard way */ b43_phy_force_clock(dev, 1); tmp = b43_phy_read(dev, B43_NPHY_BBCFG); b43_phy_write(dev, B43_NPHY_BBCFG, tmp | B43_NPHY_BBCFG_RSTCCA); b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA); b43_phy_force_clock(dev, 0); b43_mac_phy_clock_set(dev, true); b43_nphy_pa_override(dev, false); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); b43_nphy_pa_override(dev, true); b43_nphy_classifier(dev, 0, 0); b43_nphy_read_clip_detection(dev, clip); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) b43_nphy_bphy_init(dev); tx_pwr_state = nphy->txpwrctrl; b43_nphy_tx_power_ctrl(dev, false); b43_nphy_tx_power_fix(dev); b43_nphy_tx_power_ctl_idle_tssi(dev); b43_nphy_tx_power_ctl_setup(dev); b43_nphy_tx_gain_table_upload(dev); if (nphy->phyrxchain != 3) b43_nphy_set_rx_core_state(dev, nphy->phyrxchain); if (nphy->mphase_cal_phase_id > 0) ;/* TODO PHY Periodic Calibration Multi-Phase Restart */ do_rssi_cal = false; if (phy->rev >= 3) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) do_rssi_cal = !nphy->rssical_chanspec_2G.center_freq; else do_rssi_cal = !nphy->rssical_chanspec_5G.center_freq; if (do_rssi_cal) b43_nphy_rssi_cal(dev); else b43_nphy_restore_rssi_cal(dev); } else { b43_nphy_rssi_cal(dev); } if (!((nphy->measure_hold & 0x6) != 0)) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) do_cal = !nphy->iqcal_chanspec_2G.center_freq; else do_cal = !nphy->iqcal_chanspec_5G.center_freq; if (nphy->mute) do_cal = false; if (do_cal) { target = b43_nphy_get_tx_gains(dev); if (nphy->antsel_type == 2) b43_nphy_superswitch_init(dev, true); if (nphy->perical != 2) { b43_nphy_rssi_cal(dev); if (phy->rev >= 3) { nphy->cal_orig_pwr_idx[0] = nphy->txpwrindex[0].index_internal; nphy->cal_orig_pwr_idx[1] = nphy->txpwrindex[1].index_internal; /* TODO N PHY Pre Calibrate TX Gain */ target = b43_nphy_get_tx_gains(dev); } if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0) b43_nphy_save_cal(dev); } else if (nphy->mphase_cal_phase_id == 0) ;/* N PHY Periodic Calibration with arg 3 */ } else { b43_nphy_restore_cal(dev); } } b43_nphy_tx_pwr_ctrl_coef_setup(dev); b43_nphy_tx_power_ctrl(dev, tx_pwr_state); b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015); b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320); if (phy->rev >= 3 && phy->rev <= 6) b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014); b43_nphy_tx_lp_fbw(dev); if (phy->rev >= 3) b43_nphy_spur_workaround(dev); return 0; } /************************************************** * Channel switching ops. **************************************************/ static void b43_chantab_phy_upload(struct b43_wldev *dev, const struct b43_phy_n_sfo_cfg *e) { b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a); b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2); b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3); b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4); b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5); b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6); } /* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) { struct bcma_drv_cc __maybe_unused *cc; u32 __maybe_unused pmu_ctl; switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: cc = &dev->dev->bdev->bus->drv_cc; if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) { if (avoid) { bcma_chipco_pll_write(cc, 0x0, 0x11500010); bcma_chipco_pll_write(cc, 0x1, 0x000C0C06); bcma_chipco_pll_write(cc, 0x2, 0x0F600a08); bcma_chipco_pll_write(cc, 0x3, 0x00000000); bcma_chipco_pll_write(cc, 0x4, 0x2001E920); bcma_chipco_pll_write(cc, 0x5, 0x88888815); } else { bcma_chipco_pll_write(cc, 0x0, 0x11100010); bcma_chipco_pll_write(cc, 0x1, 0x000c0c06); bcma_chipco_pll_write(cc, 0x2, 0x03000a08); bcma_chipco_pll_write(cc, 0x3, 0x00000000); bcma_chipco_pll_write(cc, 0x4, 0x200005c0); bcma_chipco_pll_write(cc, 0x5, 0x88888815); } pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD; } else if (dev->dev->chip_id == 0x4716) { if (avoid) { bcma_chipco_pll_write(cc, 0x0, 0x11500060); bcma_chipco_pll_write(cc, 0x1, 0x080C0C06); bcma_chipco_pll_write(cc, 0x2, 0x0F600000); bcma_chipco_pll_write(cc, 0x3, 0x00000000); bcma_chipco_pll_write(cc, 0x4, 0x2001E924); bcma_chipco_pll_write(cc, 0x5, 0x88888815); } else { bcma_chipco_pll_write(cc, 0x0, 0x11100060); bcma_chipco_pll_write(cc, 0x1, 0x080c0c06); bcma_chipco_pll_write(cc, 0x2, 0x03000000); bcma_chipco_pll_write(cc, 0x3, 0x00000000); bcma_chipco_pll_write(cc, 0x4, 0x200005c0); bcma_chipco_pll_write(cc, 0x5, 0x88888815); } pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD | BCMA_CC_PMU_CTL_NOILPONW; } else if (dev->dev->chip_id == 0x4322 || dev->dev->chip_id == 0x4340 || dev->dev->chip_id == 0x4341) { bcma_chipco_pll_write(cc, 0x0, 0x11100070); bcma_chipco_pll_write(cc, 0x1, 0x1014140a); bcma_chipco_pll_write(cc, 0x5, 0x88888854); if (avoid) bcma_chipco_pll_write(cc, 0x2, 0x05201828); else bcma_chipco_pll_write(cc, 0x2, 0x05001828); pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD; } else { return; } bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl); break; #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: /* FIXME */ break; #endif } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */ static void b43_nphy_channel_setup(struct b43_wldev *dev, const struct b43_phy_n_sfo_cfg *e, struct ieee80211_channel *new_channel) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = dev->phy.n; int ch = new_channel->hw_value; u16 old_band_5ghz; u32 tmp32; old_band_5ghz = b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ; if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) { tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000); b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) { b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF); b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); } b43_chantab_phy_upload(dev, e); if (new_channel->hw_value == 14) { b43_nphy_classifier(dev, 2, 0); b43_phy_set(dev, B43_PHY_B_TEST, 0x0800); } else { b43_nphy_classifier(dev, 2, 2); if (new_channel->band == IEEE80211_BAND_2GHZ) b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840); } if (!nphy->txpwrctrl) b43_nphy_tx_power_fix(dev); if (dev->phy.rev < 3) b43_nphy_adjust_lna_gain_table(dev); b43_nphy_tx_lp_fbw(dev); if (dev->phy.rev >= 3 && dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) { bool avoid = false; if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) { avoid = true; } else if (!b43_channel_type_is_40mhz(phy->channel_type)) { if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14) avoid = true; } else { /* 40MHz */ if (nphy->aband_spurwar_en && (ch == 38 || ch == 102 || ch == 118)) avoid = dev->dev->chip_id == 0x4716; } b43_nphy_pmu_spur_avoid(dev, avoid); if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) { b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, avoid ? 0x5341 : 0x8889); b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8); } if (dev->phy.rev == 3 || dev->phy.rev == 4) ; /* TODO: reset PLL */ if (avoid) b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX); else b43_phy_mask(dev, B43_NPHY_BBCFG, ~B43_NPHY_BBCFG_RSTRX & 0xFFFF); b43_nphy_reset_cca(dev); /* wl sets useless phy_isspuravoid here */ } b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830); if (phy->rev >= 3) b43_nphy_spur_workaround(dev); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */ static int b43_nphy_set_channel(struct b43_wldev *dev, struct ieee80211_channel *channel, enum nl80211_channel_type channel_type) { struct b43_phy *phy = &dev->phy; const struct b43_nphy_channeltab_entry_rev2 *tabent_r2 = NULL; const struct b43_nphy_channeltab_entry_rev3 *tabent_r3 = NULL; u8 tmp; if (dev->phy.rev >= 3) { tabent_r3 = b43_nphy_get_chantabent_rev3(dev, channel->center_freq); if (!tabent_r3) return -ESRCH; } else { tabent_r2 = b43_nphy_get_chantabent_rev2(dev, channel->hw_value); if (!tabent_r2) return -ESRCH; } /* Channel is set later in common code, but we need to set it on our own to let this function's subcalls work properly. */ phy->channel = channel->hw_value; phy->channel_freq = channel->center_freq; if (b43_channel_type_is_40mhz(phy->channel_type) != b43_channel_type_is_40mhz(channel_type)) ; /* TODO: BMAC BW Set (channel_type) */ if (channel_type == NL80211_CHAN_HT40PLUS) b43_phy_set(dev, B43_NPHY_RXCTL, B43_NPHY_RXCTL_BSELU20); else if (channel_type == NL80211_CHAN_HT40MINUS) b43_phy_mask(dev, B43_NPHY_RXCTL, ~B43_NPHY_RXCTL_BSELU20); if (dev->phy.rev >= 3) { tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0; b43_radio_maskset(dev, 0x08, 0xFFFB, tmp); b43_radio_2056_setup(dev, tabent_r3); b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel); } else { tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050; b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp); b43_radio_2055_setup(dev, tabent_r2); b43_nphy_channel_setup(dev, &(tabent_r2->phy_regs), channel); } return 0; } /************************************************** * Basic PHY ops. **************************************************/ static int b43_nphy_op_allocate(struct b43_wldev *dev) { struct b43_phy_n *nphy; nphy = kzalloc(sizeof(*nphy), GFP_KERNEL); if (!nphy) return -ENOMEM; dev->phy.n = nphy; return 0; } static void b43_nphy_op_prepare_structs(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; struct ssb_sprom *sprom = dev->dev->bus_sprom; memset(nphy, 0, sizeof(*nphy)); nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4); nphy->spur_avoid = (phy->rev >= 3) ? B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE; nphy->gain_boost = true; /* this way we follow wl, assume it is true */ nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */ nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */ nphy->perical = 2; /* avoid additional rssi cal on init (like wl) */ /* 128 can mean disabled-by-default state of TX pwr ctl. Max value is * 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */ nphy->tx_pwr_idx[0] = 128; nphy->tx_pwr_idx[1] = 128; /* Hardware TX power control and 5GHz power gain */ nphy->txpwrctrl = false; nphy->pwg_gain_5ghz = false; if (dev->phy.rev >= 3 || (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE && (dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) { nphy->txpwrctrl = true; nphy->pwg_gain_5ghz = true; } else if (sprom->revision >= 4) { if (dev->phy.rev >= 2 && (sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) { nphy->txpwrctrl = true; #ifdef CONFIG_B43_SSB if (dev->dev->bus_type == B43_BUS_SSB && dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) { struct pci_dev *pdev = dev->dev->sdev->bus->host_pci; if (pdev->device == 0x4328 || pdev->device == 0x432a) nphy->pwg_gain_5ghz = true; } #endif } else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) { nphy->pwg_gain_5ghz = true; } } if (dev->phy.rev >= 3) { nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2; nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2; } } static void b43_nphy_op_free(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; kfree(nphy); phy->n = NULL; } static int b43_nphy_op_init(struct b43_wldev *dev) { return b43_phy_initn(dev); } static inline void check_phyreg(struct b43_wldev *dev, u16 offset) { #if B43_DEBUG if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) { /* OFDM registers are onnly available on A/G-PHYs */ b43err(dev->wl, "Invalid OFDM PHY access at " "0x%04X on N-PHY\n", offset); dump_stack(); } if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) { /* Ext-G registers are only available on G-PHYs */ b43err(dev->wl, "Invalid EXT-G PHY access at " "0x%04X on N-PHY\n", offset); dump_stack(); } #endif /* B43_DEBUG */ } static u16 b43_nphy_op_read(struct b43_wldev *dev, u16 reg) { check_phyreg(dev, reg); b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); return b43_read16(dev, B43_MMIO_PHY_DATA); } static void b43_nphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) { check_phyreg(dev, reg); b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, value); } static void b43_nphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { check_phyreg(dev, reg); b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); b43_maskset16(dev, B43_MMIO_PHY_DATA, mask, set); } static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg) { /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); /* N-PHY needs 0x100 for read access */ reg |= 0x100; b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); } static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } /* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, bool blocked) { if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED) b43err(dev->wl, "MAC not suspended\n"); if (blocked) { b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU); if (dev->phy.rev >= 3) { b43_radio_mask(dev, 0x09, ~0x2); b43_radio_write(dev, 0x204D, 0); b43_radio_write(dev, 0x2053, 0); b43_radio_write(dev, 0x2058, 0); b43_radio_write(dev, 0x205E, 0); b43_radio_mask(dev, 0x2062, ~0xF0); b43_radio_write(dev, 0x2064, 0); b43_radio_write(dev, 0x304D, 0); b43_radio_write(dev, 0x3053, 0); b43_radio_write(dev, 0x3058, 0); b43_radio_write(dev, 0x305E, 0); b43_radio_mask(dev, 0x3062, ~0xF0); b43_radio_write(dev, 0x3064, 0); } } else { if (dev->phy.rev >= 3) { b43_radio_init2056(dev); b43_switch_channel(dev, dev->phy.channel); } else { b43_radio_init2055(dev); } } } /* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */ static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on) { u16 override = on ? 0x0 : 0x7FFF; u16 core = on ? 0xD : 0x00FD; if (dev->phy.rev >= 3) { if (on) { b43_phy_write(dev, B43_NPHY_AFECTL_C1, core); b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, override); b43_phy_write(dev, B43_NPHY_AFECTL_C2, core); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, override); } else { b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, override); b43_phy_write(dev, B43_NPHY_AFECTL_C1, core); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, override); b43_phy_write(dev, B43_NPHY_AFECTL_C2, core); } } else { b43_phy_write(dev, B43_NPHY_AFECTL_OVER, override); } } static int b43_nphy_op_switch_channel(struct b43_wldev *dev, unsigned int new_channel) { struct ieee80211_channel *channel = dev->wl->hw->conf.channel; enum nl80211_channel_type channel_type = dev->wl->hw->conf.channel_type; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { if ((new_channel < 1) || (new_channel > 14)) return -EINVAL; } else { if (new_channel > 200) return -EINVAL; } return b43_nphy_set_channel(dev, channel, channel_type); } static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) return 1; return 36; } const struct b43_phy_operations b43_phyops_n = { .allocate = b43_nphy_op_allocate, .free = b43_nphy_op_free, .prepare_structs = b43_nphy_op_prepare_structs, .init = b43_nphy_op_init, .phy_read = b43_nphy_op_read, .phy_write = b43_nphy_op_write, .phy_maskset = b43_nphy_op_maskset, .radio_read = b43_nphy_op_radio_read, .radio_write = b43_nphy_op_radio_write, .software_rfkill = b43_nphy_op_software_rfkill, .switch_analog = b43_nphy_op_switch_analog, .switch_channel = b43_nphy_op_switch_channel, .get_default_chan = b43_nphy_op_get_default_chan, .recalc_txpower = b43_nphy_op_recalc_txpower, .adjust_txpower = b43_nphy_op_adjust_txpower, };
gpl-2.0
PRJosh/kernel_msm
arch/powerpc/kernel/ptrace32.c
4403
9356
/* * ptrace for 32-bit processes running on a 64-bit kernel. * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Derived from "arch/m68k/kernel/ptrace.c" * Copyright (C) 1994 by Hamish Macdonald * Taken from linux/kernel/ptrace.c and modified for M680x0. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds * * Modified by Cort Dougan (cort@hq.fsmlabs.com) * and Paul Mackerras (paulus@samba.org). * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of * this archive for more details. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/user.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/compat.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/switch_to.h> /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, * we mark them as obsolete now, they will be removed in a future version */ static long compat_ptrace_old(struct task_struct *child, long request, long addr, long data) { switch (request) { case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ return copy_regset_to_user(child, task_user_regset_view(current), 0, 0, 32 * sizeof(compat_long_t), compat_ptr(data)); case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ return copy_regset_from_user(child, task_user_regset_view(current), 0, 0, 32 * sizeof(compat_long_t), compat_ptr(data)); } return -EPERM; } /* Macros to workout the correct index for the FPR in the thread struct */ #define FPRNUMBER(i) (((i) - PT_FPR0) >> 1) #define FPRHALF(i) (((i) - PT_FPR0) & 1) #define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i) #define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0)) long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { unsigned long addr = caddr; unsigned long data = cdata; int ret; switch (request) { /* * Read 4 bytes of the other process' storage * data is a pointer specifying where the user wants the * 4 bytes copied into * addr is a pointer in the user's storage that contains an 8 byte * address in the other process of the 4 bytes that is to be read * (this is run in a 32-bit process looking at a 64-bit process) * when I and D space are separate, these will need to be fixed. */ case PPC_PTRACE_PEEKTEXT_3264: case PPC_PTRACE_PEEKDATA_3264: { u32 tmp; int copied; u32 __user * addrOthers; ret = -EIO; /* Get the addr in the other process that we want to read */ if (get_user(addrOthers, (u32 __user * __user *)addr) != 0) break; copied = access_process_vm(child, (u64)addrOthers, &tmp, sizeof(tmp), 0); if (copied != sizeof(tmp)) break; ret = put_user(tmp, (u32 __user *)data); break; } /* Read a register (specified by ADDR) out of the "user area" */ case PTRACE_PEEKUSR: { int index; unsigned long tmp; ret = -EIO; /* convert to index and check */ index = (unsigned long) addr >> 2; if ((addr & 3) || (index > PT_FPSCR32)) break; CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { tmp = ptrace_get_reg(child, index); } else { flush_fp_to_thread(child); /* * the user space code considers the floating point * to be an array of unsigned int (32 bits) - the * index passed in is based on this assumption. */ tmp = ((unsigned int *)child->thread.fpr) [FPRINDEX(index)]; } ret = put_user((unsigned int)tmp, (u32 __user *)data); break; } /* * Read 4 bytes out of the other process' pt_regs area * data is a pointer specifying where the user wants the * 4 bytes copied into * addr is the offset into the other process' pt_regs structure * that is to be read * (this is run in a 32-bit process looking at a 64-bit process) */ case PPC_PTRACE_PEEKUSR_3264: { u32 index; u32 reg32bits; u64 tmp; u32 numReg; u32 part; ret = -EIO; /* Determine which register the user wants */ index = (u64)addr >> 2; numReg = index / 2; /* Determine which part of the register the user wants */ if (index % 2) part = 1; /* want the 2nd half of the register (right-most). */ else part = 0; /* want the 1st half of the register (left-most). */ /* Validate the input - check to see if address is on the wrong boundary * or beyond the end of the user area */ if ((addr & 3) || numReg > PT_FPSCR) break; CHECK_FULL_REGS(child->thread.regs); if (numReg >= PT_FPR0) { flush_fp_to_thread(child); /* get 64 bit FPR */ tmp = ((u64 *)child->thread.fpr) [FPRINDEX_3264(numReg)]; } else { /* register within PT_REGS struct */ tmp = ptrace_get_reg(child, numReg); } reg32bits = ((u32*)&tmp)[part]; ret = put_user(reg32bits, (u32 __user *)data); break; } /* * Write 4 bytes into the other process' storage * data is the 4 bytes that the user wants written * addr is a pointer in the user's storage that contains an * 8 byte address in the other process where the 4 bytes * that is to be written * (this is run in a 32-bit process looking at a 64-bit process) * when I and D space are separate, these will need to be fixed. */ case PPC_PTRACE_POKETEXT_3264: case PPC_PTRACE_POKEDATA_3264: { u32 tmp = data; u32 __user * addrOthers; /* Get the addr in the other process that we want to write into */ ret = -EIO; if (get_user(addrOthers, (u32 __user * __user *)addr) != 0) break; ret = 0; if (access_process_vm(child, (u64)addrOthers, &tmp, sizeof(tmp), 1) == sizeof(tmp)) break; ret = -EIO; break; } /* write the word at location addr in the USER area */ case PTRACE_POKEUSR: { unsigned long index; ret = -EIO; /* convert to index and check */ index = (unsigned long) addr >> 2; if ((addr & 3) || (index > PT_FPSCR32)) break; CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { ret = ptrace_put_reg(child, index, data); } else { flush_fp_to_thread(child); /* * the user space code considers the floating point * to be an array of unsigned int (32 bits) - the * index passed in is based on this assumption. */ ((unsigned int *)child->thread.fpr) [FPRINDEX(index)] = data; ret = 0; } break; } /* * Write 4 bytes into the other process' pt_regs area * data is the 4 bytes that the user wants written * addr is the offset into the other process' pt_regs structure * that is to be written into * (this is run in a 32-bit process looking at a 64-bit process) */ case PPC_PTRACE_POKEUSR_3264: { u32 index; u32 numReg; ret = -EIO; /* Determine which register the user wants */ index = (u64)addr >> 2; numReg = index / 2; /* * Validate the input - check to see if address is on the * wrong boundary or beyond the end of the user area */ if ((addr & 3) || (numReg > PT_FPSCR)) break; CHECK_FULL_REGS(child->thread.regs); if (numReg < PT_FPR0) { unsigned long freg = ptrace_get_reg(child, numReg); if (index % 2) freg = (freg & ~0xfffffffful) | (data & 0xfffffffful); else freg = (freg & 0xfffffffful) | (data << 32); ret = ptrace_put_reg(child, numReg, freg); } else { u64 *tmp; flush_fp_to_thread(child); /* get 64 bit FPR ... */ tmp = &(((u64 *)child->thread.fpr) [FPRINDEX_3264(numReg)]); /* ... write the 32 bit part we want */ ((u32 *)tmp)[index % 2] = data; ret = 0; } break; } case PTRACE_GET_DEBUGREG: { ret = -EINVAL; /* We only support one DABR and no IABRS at the moment */ if (addr > 0) break; #ifdef CONFIG_PPC_ADV_DEBUG_REGS ret = put_user(child->thread.dac1, (u32 __user *)data); #else ret = put_user(child->thread.dabr, (u32 __user *)data); #endif break; } case PTRACE_GETREGS: /* Get all pt_regs from the child. */ return copy_regset_to_user( child, task_user_regset_view(current), 0, 0, PT_REGS_COUNT * sizeof(compat_long_t), compat_ptr(data)); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user( child, task_user_regset_view(current), 0, 0, PT_REGS_COUNT * sizeof(compat_long_t), compat_ptr(data)); case PTRACE_GETFPREGS: case PTRACE_SETFPREGS: case PTRACE_GETVRREGS: case PTRACE_SETVRREGS: case PTRACE_GETVSRREGS: case PTRACE_SETVSRREGS: case PTRACE_GETREGS64: case PTRACE_SETREGS64: case PPC_PTRACE_GETFPREGS: case PPC_PTRACE_SETFPREGS: case PTRACE_KILL: case PTRACE_SINGLESTEP: case PTRACE_DETACH: case PTRACE_SET_DEBUGREG: case PTRACE_SYSCALL: case PTRACE_CONT: case PPC_PTRACE_GETHWDBGINFO: case PPC_PTRACE_SETHWDEBUG: case PPC_PTRACE_DELHWDEBUG: ret = arch_ptrace(child, request, addr, data); break; /* Old reverse args ptrace callss */ case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ ret = compat_ptrace_old(child, request, addr, data); break; default: ret = compat_ptrace_request(child, request, addr, data); break; } return ret; }
gpl-2.0