repo_name
string
path
string
copies
string
size
string
content
string
license
string
myself659/linux
kernel/async.c
1268
10140
/* * async.c: Asynchronous function calls for boot performance * * (C) Copyright 2009 Intel Corporation * Author: Arjan van de Ven <arjan@linux.intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ /* Goals and Theory of Operation The primary goal of this feature is to reduce the kernel boot time, by doing various independent hardware delays and discovery operations decoupled and not strictly serialized. More specifically, the asynchronous function call concept allows certain operations (primarily during system boot) to happen asynchronously, out of order, while these operations still have their externally visible parts happen sequentially and in-order. (not unlike how out-of-order CPUs retire their instructions in order) Key to the asynchronous function call implementation is the concept of a "sequence cookie" (which, although it has an abstracted type, can be thought of as a monotonically incrementing number). The async core will assign each scheduled event such a sequence cookie and pass this to the called functions. The asynchronously called function should before doing a globally visible operation, such as registering device numbers, call the async_synchronize_cookie() function and pass in its own cookie. The async_synchronize_cookie() function will make sure that all asynchronous operations that were scheduled prior to the operation corresponding with the cookie have completed. Subsystem/driver initialization code that scheduled asynchronous probe functions, but which shares global resources with other drivers/subsystems that do not use the asynchronous call feature, need to do a full synchronization with the async_synchronize_full() function, before returning from their init function. This is to maintain strict ordering between the asynchronous and synchronous parts of the kernel. */ #include <linux/async.h> #include <linux/atomic.h> #include <linux/ktime.h> #include <linux/export.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "workqueue_internal.h" static async_cookie_t next_cookie = 1; #define MAX_WORK 32768 #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */ static LIST_HEAD(async_global_pending); /* pending from all registered doms */ static ASYNC_DOMAIN(async_dfl_domain); static DEFINE_SPINLOCK(async_lock); struct async_entry { struct list_head domain_list; struct list_head global_list; struct work_struct work; async_cookie_t cookie; async_func_t func; void *data; struct async_domain *domain; }; static DECLARE_WAIT_QUEUE_HEAD(async_done); static atomic_t entry_count; static async_cookie_t lowest_in_progress(struct async_domain *domain) { struct list_head *pending; async_cookie_t ret = ASYNC_COOKIE_MAX; unsigned long flags; spin_lock_irqsave(&async_lock, flags); if (domain) pending = &domain->pending; else pending = &async_global_pending; if (!list_empty(pending)) ret = list_first_entry(pending, struct async_entry, domain_list)->cookie; spin_unlock_irqrestore(&async_lock, flags); return ret; } /* * pick the first pending entry and run it */ static void async_run_entry_fn(struct work_struct *work) { struct async_entry *entry = container_of(work, struct async_entry, work); unsigned long flags; ktime_t uninitialized_var(calltime), delta, rettime; /* 1) run (and print duration) */ if (initcall_debug && system_state == SYSTEM_BOOTING) { pr_debug("calling %lli_%pF @ %i\n", (long long)entry->cookie, entry->func, task_pid_nr(current)); calltime = ktime_get(); } entry->func(entry->data, entry->cookie); if (initcall_debug && system_state == SYSTEM_BOOTING) { rettime = ktime_get(); delta = ktime_sub(rettime, calltime); pr_debug("initcall %lli_%pF returned 0 after %lld usecs\n", (long long)entry->cookie, entry->func, (long long)ktime_to_ns(delta) >> 10); } /* 2) remove self from the pending queues */ spin_lock_irqsave(&async_lock, flags); list_del_init(&entry->domain_list); list_del_init(&entry->global_list); /* 3) free the entry */ kfree(entry); atomic_dec(&entry_count); spin_unlock_irqrestore(&async_lock, flags); /* 4) wake up any waiters */ wake_up(&async_done); } static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain) { struct async_entry *entry; unsigned long flags; async_cookie_t newcookie; /* allow irq-off callers */ entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); /* * If we're out of memory or if there's too much work * pending already, we execute synchronously. */ if (!entry || atomic_read(&entry_count) > MAX_WORK) { kfree(entry); spin_lock_irqsave(&async_lock, flags); newcookie = next_cookie++; spin_unlock_irqrestore(&async_lock, flags); /* low on memory.. run synchronously */ func(data, newcookie); return newcookie; } INIT_LIST_HEAD(&entry->domain_list); INIT_LIST_HEAD(&entry->global_list); INIT_WORK(&entry->work, async_run_entry_fn); entry->func = func; entry->data = data; entry->domain = domain; spin_lock_irqsave(&async_lock, flags); /* allocate cookie and queue */ newcookie = entry->cookie = next_cookie++; list_add_tail(&entry->domain_list, &domain->pending); if (domain->registered) list_add_tail(&entry->global_list, &async_global_pending); atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); /* mark that this task has queued an async job, used by module init */ current->flags |= PF_USED_ASYNC; /* schedule for execution */ queue_work(system_unbound_wq, &entry->work); return newcookie; } /** * async_schedule - schedule a function for asynchronous execution * @func: function to execute asynchronously * @data: data pointer to pass to the function * * Returns an async_cookie_t that may be used for checkpointing later. * Note: This function may be called from atomic or non-atomic contexts. */ async_cookie_t async_schedule(async_func_t func, void *data) { return __async_schedule(func, data, &async_dfl_domain); } EXPORT_SYMBOL_GPL(async_schedule); /** * async_schedule_domain - schedule a function for asynchronous execution within a certain domain * @func: function to execute asynchronously * @data: data pointer to pass to the function * @domain: the domain * * Returns an async_cookie_t that may be used for checkpointing later. * @domain may be used in the async_synchronize_*_domain() functions to * wait within a certain synchronization domain rather than globally. A * synchronization domain is specified via @domain. Note: This function * may be called from atomic or non-atomic contexts. */ async_cookie_t async_schedule_domain(async_func_t func, void *data, struct async_domain *domain) { return __async_schedule(func, data, domain); } EXPORT_SYMBOL_GPL(async_schedule_domain); /** * async_synchronize_full - synchronize all asynchronous function calls * * This function waits until all asynchronous function calls have been done. */ void async_synchronize_full(void) { async_synchronize_full_domain(NULL); } EXPORT_SYMBOL_GPL(async_synchronize_full); /** * async_unregister_domain - ensure no more anonymous waiters on this domain * @domain: idle domain to flush out of any async_synchronize_full instances * * async_synchronize_{cookie|full}_domain() are not flushed since callers * of these routines should know the lifetime of @domain * * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing */ void async_unregister_domain(struct async_domain *domain) { spin_lock_irq(&async_lock); WARN_ON(!domain->registered || !list_empty(&domain->pending)); domain->registered = 0; spin_unlock_irq(&async_lock); } EXPORT_SYMBOL_GPL(async_unregister_domain); /** * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain * @domain: the domain to synchronize * * This function waits until all asynchronous function calls for the * synchronization domain specified by @domain have been done. */ void async_synchronize_full_domain(struct async_domain *domain) { async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain); } EXPORT_SYMBOL_GPL(async_synchronize_full_domain); /** * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing * @cookie: async_cookie_t to use as checkpoint * @domain: the domain to synchronize (%NULL for all registered domains) * * This function waits until all asynchronous function calls for the * synchronization domain specified by @domain submitted prior to @cookie * have been done. */ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain) { ktime_t uninitialized_var(starttime), delta, endtime; if (initcall_debug && system_state == SYSTEM_BOOTING) { pr_debug("async_waiting @ %i\n", task_pid_nr(current)); starttime = ktime_get(); } wait_event(async_done, lowest_in_progress(domain) >= cookie); if (initcall_debug && system_state == SYSTEM_BOOTING) { endtime = ktime_get(); delta = ktime_sub(endtime, starttime); pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current), (long long)ktime_to_ns(delta) >> 10); } } EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); /** * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing * @cookie: async_cookie_t to use as checkpoint * * This function waits until all asynchronous function calls prior to @cookie * have been done. */ void async_synchronize_cookie(async_cookie_t cookie) { async_synchronize_cookie_domain(cookie, &async_dfl_domain); } EXPORT_SYMBOL_GPL(async_synchronize_cookie); /** * current_is_async - is %current an async worker task? * * Returns %true if %current is an async worker task. */ bool current_is_async(void) { struct worker *worker = current_wq_worker(); return worker && worker->current_func == async_run_entry_fn; }
gpl-2.0
charles1018/The-f2fs-filesystem
tools/perf/arch/x86/util/unwind-libunwind.c
1524
1941
#include <errno.h> #include <libunwind.h> #include "perf_regs.h" #include "../../util/unwind.h" #include "../../util/debug.h" #ifdef HAVE_ARCH_X86_64_SUPPORT int libunwind__arch_reg_id(int regnum) { int id; switch (regnum) { case UNW_X86_64_RAX: id = PERF_REG_X86_AX; break; case UNW_X86_64_RDX: id = PERF_REG_X86_DX; break; case UNW_X86_64_RCX: id = PERF_REG_X86_CX; break; case UNW_X86_64_RBX: id = PERF_REG_X86_BX; break; case UNW_X86_64_RSI: id = PERF_REG_X86_SI; break; case UNW_X86_64_RDI: id = PERF_REG_X86_DI; break; case UNW_X86_64_RBP: id = PERF_REG_X86_BP; break; case UNW_X86_64_RSP: id = PERF_REG_X86_SP; break; case UNW_X86_64_R8: id = PERF_REG_X86_R8; break; case UNW_X86_64_R9: id = PERF_REG_X86_R9; break; case UNW_X86_64_R10: id = PERF_REG_X86_R10; break; case UNW_X86_64_R11: id = PERF_REG_X86_R11; break; case UNW_X86_64_R12: id = PERF_REG_X86_R12; break; case UNW_X86_64_R13: id = PERF_REG_X86_R13; break; case UNW_X86_64_R14: id = PERF_REG_X86_R14; break; case UNW_X86_64_R15: id = PERF_REG_X86_R15; break; case UNW_X86_64_RIP: id = PERF_REG_X86_IP; break; default: pr_err("unwind: invalid reg id %d\n", regnum); return -EINVAL; } return id; } #else int libunwind__arch_reg_id(int regnum) { int id; switch (regnum) { case UNW_X86_EAX: id = PERF_REG_X86_AX; break; case UNW_X86_EDX: id = PERF_REG_X86_DX; break; case UNW_X86_ECX: id = PERF_REG_X86_CX; break; case UNW_X86_EBX: id = PERF_REG_X86_BX; break; case UNW_X86_ESI: id = PERF_REG_X86_SI; break; case UNW_X86_EDI: id = PERF_REG_X86_DI; break; case UNW_X86_EBP: id = PERF_REG_X86_BP; break; case UNW_X86_ESP: id = PERF_REG_X86_SP; break; case UNW_X86_EIP: id = PERF_REG_X86_IP; break; default: pr_err("unwind: invalid reg id %d\n", regnum); return -EINVAL; } return id; } #endif /* HAVE_ARCH_X86_64_SUPPORT */
gpl-2.0
hellsgod/hells-Core-N6P
kernel/delayacct.c
2292
5248
/* delayacct.c - per-task delay accounting * * Copyright (C) Shailabh Nagar, IBM Corp. 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/taskstats.h> #include <linux/time.h> #include <linux/sysctl.h> #include <linux/delayacct.h> #include <linux/module.h> int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */ EXPORT_SYMBOL_GPL(delayacct_on); struct kmem_cache *delayacct_cache; static int __init delayacct_setup_disable(char *str) { delayacct_on = 0; return 1; } __setup("nodelayacct", delayacct_setup_disable); void delayacct_init(void) { delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC); delayacct_tsk_init(&init_task); } void __delayacct_tsk_init(struct task_struct *tsk) { tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL); if (tsk->delays) spin_lock_init(&tsk->delays->lock); } /* * Start accounting for a delay statistic using * its starting timestamp (@start) */ static inline void delayacct_start(struct timespec *start) { do_posix_clock_monotonic_gettime(start); } /* * Finish delay accounting for a statistic using * its timestamps (@start, @end), accumalator (@total) and @count */ static void delayacct_end(struct timespec *start, struct timespec *end, u64 *total, u32 *count) { struct timespec ts; s64 ns; unsigned long flags; do_posix_clock_monotonic_gettime(end); ts = timespec_sub(*end, *start); ns = timespec_to_ns(&ts); if (ns < 0) return; spin_lock_irqsave(&current->delays->lock, flags); *total += ns; (*count)++; spin_unlock_irqrestore(&current->delays->lock, flags); } void __delayacct_blkio_start(void) { delayacct_start(&current->delays->blkio_start); } void __delayacct_blkio_end(void) { if (current->delays->flags & DELAYACCT_PF_SWAPIN) /* Swapin block I/O */ delayacct_end(&current->delays->blkio_start, &current->delays->blkio_end, &current->delays->swapin_delay, &current->delays->swapin_count); else /* Other block I/O */ delayacct_end(&current->delays->blkio_start, &current->delays->blkio_end, &current->delays->blkio_delay, &current->delays->blkio_count); } int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) { s64 tmp; unsigned long t1; unsigned long long t2, t3; unsigned long flags; struct timespec ts; cputime_t utime, stime, stimescaled, utimescaled; /* Though tsk->delays accessed later, early exit avoids * unnecessary returning of other data */ if (!tsk->delays) goto done; tmp = (s64)d->cpu_run_real_total; task_cputime(tsk, &utime, &stime); cputime_to_timespec(utime + stime, &ts); tmp += timespec_to_ns(&ts); d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; tmp = (s64)d->cpu_scaled_run_real_total; task_cputime_scaled(tsk, &utimescaled, &stimescaled); cputime_to_timespec(utimescaled + stimescaled, &ts); tmp += timespec_to_ns(&ts); d->cpu_scaled_run_real_total = (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; /* * No locking available for sched_info (and too expensive to add one) * Mitigate by taking snapshot of values */ t1 = tsk->sched_info.pcount; t2 = tsk->sched_info.run_delay; t3 = tsk->se.sum_exec_runtime; d->cpu_count += t1; tmp = (s64)d->cpu_delay_total + t2; d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; tmp = (s64)d->cpu_run_virtual_total + t3; d->cpu_run_virtual_total = (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp; /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ spin_lock_irqsave(&tsk->delays->lock, flags); tmp = d->blkio_delay_total + tsk->delays->blkio_delay; d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; tmp = d->swapin_delay_total + tsk->delays->swapin_delay; d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; tmp = d->freepages_delay_total + tsk->delays->freepages_delay; d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp; d->blkio_count += tsk->delays->blkio_count; d->swapin_count += tsk->delays->swapin_count; d->freepages_count += tsk->delays->freepages_count; spin_unlock_irqrestore(&tsk->delays->lock, flags); done: return 0; } __u64 __delayacct_blkio_ticks(struct task_struct *tsk) { __u64 ret; unsigned long flags; spin_lock_irqsave(&tsk->delays->lock, flags); ret = nsec_to_clock_t(tsk->delays->blkio_delay + tsk->delays->swapin_delay); spin_unlock_irqrestore(&tsk->delays->lock, flags); return ret; } void __delayacct_freepages_start(void) { delayacct_start(&current->delays->freepages_start); } void __delayacct_freepages_end(void) { delayacct_end(&current->delays->freepages_start, &current->delays->freepages_end, &current->delays->freepages_delay, &current->delays->freepages_count); }
gpl-2.0
nian0114/android_kernel_zte_n918st
fs/fscache/stats.c
3572
10245
/* FS-Cache statistics * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define FSCACHE_DEBUG_LEVEL THREAD #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "internal.h" /* * operation counters */ atomic_t fscache_n_op_pend; atomic_t fscache_n_op_run; atomic_t fscache_n_op_enqueue; atomic_t fscache_n_op_requeue; atomic_t fscache_n_op_deferred_release; atomic_t fscache_n_op_release; atomic_t fscache_n_op_gc; atomic_t fscache_n_op_cancelled; atomic_t fscache_n_op_rejected; atomic_t fscache_n_attr_changed; atomic_t fscache_n_attr_changed_ok; atomic_t fscache_n_attr_changed_nobufs; atomic_t fscache_n_attr_changed_nomem; atomic_t fscache_n_attr_changed_calls; atomic_t fscache_n_allocs; atomic_t fscache_n_allocs_ok; atomic_t fscache_n_allocs_wait; atomic_t fscache_n_allocs_nobufs; atomic_t fscache_n_allocs_intr; atomic_t fscache_n_allocs_object_dead; atomic_t fscache_n_alloc_ops; atomic_t fscache_n_alloc_op_waits; atomic_t fscache_n_retrievals; atomic_t fscache_n_retrievals_ok; atomic_t fscache_n_retrievals_wait; atomic_t fscache_n_retrievals_nodata; atomic_t fscache_n_retrievals_nobufs; atomic_t fscache_n_retrievals_intr; atomic_t fscache_n_retrievals_nomem; atomic_t fscache_n_retrievals_object_dead; atomic_t fscache_n_retrieval_ops; atomic_t fscache_n_retrieval_op_waits; atomic_t fscache_n_stores; atomic_t fscache_n_stores_ok; atomic_t fscache_n_stores_again; atomic_t fscache_n_stores_nobufs; atomic_t fscache_n_stores_oom; atomic_t fscache_n_store_ops; atomic_t fscache_n_store_calls; atomic_t fscache_n_store_pages; atomic_t fscache_n_store_radix_deletes; atomic_t fscache_n_store_pages_over_limit; atomic_t fscache_n_store_vmscan_not_storing; atomic_t fscache_n_store_vmscan_gone; atomic_t fscache_n_store_vmscan_busy; atomic_t fscache_n_store_vmscan_cancelled; atomic_t fscache_n_store_vmscan_wait; atomic_t fscache_n_marks; atomic_t fscache_n_uncaches; atomic_t fscache_n_acquires; atomic_t fscache_n_acquires_null; atomic_t fscache_n_acquires_no_cache; atomic_t fscache_n_acquires_ok; atomic_t fscache_n_acquires_nobufs; atomic_t fscache_n_acquires_oom; atomic_t fscache_n_invalidates; atomic_t fscache_n_invalidates_run; atomic_t fscache_n_updates; atomic_t fscache_n_updates_null; atomic_t fscache_n_updates_run; atomic_t fscache_n_relinquishes; atomic_t fscache_n_relinquishes_null; atomic_t fscache_n_relinquishes_waitcrt; atomic_t fscache_n_relinquishes_retire; atomic_t fscache_n_cookie_index; atomic_t fscache_n_cookie_data; atomic_t fscache_n_cookie_special; atomic_t fscache_n_object_alloc; atomic_t fscache_n_object_no_alloc; atomic_t fscache_n_object_lookups; atomic_t fscache_n_object_lookups_negative; atomic_t fscache_n_object_lookups_positive; atomic_t fscache_n_object_lookups_timed_out; atomic_t fscache_n_object_created; atomic_t fscache_n_object_avail; atomic_t fscache_n_object_dead; atomic_t fscache_n_checkaux_none; atomic_t fscache_n_checkaux_okay; atomic_t fscache_n_checkaux_update; atomic_t fscache_n_checkaux_obsolete; atomic_t fscache_n_cop_alloc_object; atomic_t fscache_n_cop_lookup_object; atomic_t fscache_n_cop_lookup_complete; atomic_t fscache_n_cop_grab_object; atomic_t fscache_n_cop_invalidate_object; atomic_t fscache_n_cop_update_object; atomic_t fscache_n_cop_drop_object; atomic_t fscache_n_cop_put_object; atomic_t fscache_n_cop_sync_cache; atomic_t fscache_n_cop_attr_changed; atomic_t fscache_n_cop_read_or_alloc_page; atomic_t fscache_n_cop_read_or_alloc_pages; atomic_t fscache_n_cop_allocate_page; atomic_t fscache_n_cop_allocate_pages; atomic_t fscache_n_cop_write_page; atomic_t fscache_n_cop_uncache_page; atomic_t fscache_n_cop_dissociate_pages; /* * display the general statistics */ static int fscache_stats_show(struct seq_file *m, void *v) { seq_puts(m, "FS-Cache statistics\n"); seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n", atomic_read(&fscache_n_cookie_index), atomic_read(&fscache_n_cookie_data), atomic_read(&fscache_n_cookie_special)); seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n", atomic_read(&fscache_n_object_alloc), atomic_read(&fscache_n_object_no_alloc), atomic_read(&fscache_n_object_avail), atomic_read(&fscache_n_object_dead)); seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n", atomic_read(&fscache_n_checkaux_none), atomic_read(&fscache_n_checkaux_okay), atomic_read(&fscache_n_checkaux_update), atomic_read(&fscache_n_checkaux_obsolete)); seq_printf(m, "Pages : mrk=%u unc=%u\n", atomic_read(&fscache_n_marks), atomic_read(&fscache_n_uncaches)); seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u" " oom=%u\n", atomic_read(&fscache_n_acquires), atomic_read(&fscache_n_acquires_null), atomic_read(&fscache_n_acquires_no_cache), atomic_read(&fscache_n_acquires_ok), atomic_read(&fscache_n_acquires_nobufs), atomic_read(&fscache_n_acquires_oom)); seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n", atomic_read(&fscache_n_object_lookups), atomic_read(&fscache_n_object_lookups_negative), atomic_read(&fscache_n_object_lookups_positive), atomic_read(&fscache_n_object_created), atomic_read(&fscache_n_object_lookups_timed_out)); seq_printf(m, "Invals : n=%u run=%u\n", atomic_read(&fscache_n_invalidates), atomic_read(&fscache_n_invalidates_run)); seq_printf(m, "Updates: n=%u nul=%u run=%u\n", atomic_read(&fscache_n_updates), atomic_read(&fscache_n_updates_null), atomic_read(&fscache_n_updates_run)); seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n", atomic_read(&fscache_n_relinquishes), atomic_read(&fscache_n_relinquishes_null), atomic_read(&fscache_n_relinquishes_waitcrt), atomic_read(&fscache_n_relinquishes_retire)); seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", atomic_read(&fscache_n_attr_changed), atomic_read(&fscache_n_attr_changed_ok), atomic_read(&fscache_n_attr_changed_nobufs), atomic_read(&fscache_n_attr_changed_nomem), atomic_read(&fscache_n_attr_changed_calls)); seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n", atomic_read(&fscache_n_allocs), atomic_read(&fscache_n_allocs_ok), atomic_read(&fscache_n_allocs_wait), atomic_read(&fscache_n_allocs_nobufs), atomic_read(&fscache_n_allocs_intr)); seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n", atomic_read(&fscache_n_alloc_ops), atomic_read(&fscache_n_alloc_op_waits), atomic_read(&fscache_n_allocs_object_dead)); seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" " int=%u oom=%u\n", atomic_read(&fscache_n_retrievals), atomic_read(&fscache_n_retrievals_ok), atomic_read(&fscache_n_retrievals_wait), atomic_read(&fscache_n_retrievals_nodata), atomic_read(&fscache_n_retrievals_nobufs), atomic_read(&fscache_n_retrievals_intr), atomic_read(&fscache_n_retrievals_nomem)); seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n", atomic_read(&fscache_n_retrieval_ops), atomic_read(&fscache_n_retrieval_op_waits), atomic_read(&fscache_n_retrievals_object_dead)); seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", atomic_read(&fscache_n_stores), atomic_read(&fscache_n_stores_ok), atomic_read(&fscache_n_stores_again), atomic_read(&fscache_n_stores_nobufs), atomic_read(&fscache_n_stores_oom)); seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n", atomic_read(&fscache_n_store_ops), atomic_read(&fscache_n_store_calls), atomic_read(&fscache_n_store_pages), atomic_read(&fscache_n_store_radix_deletes), atomic_read(&fscache_n_store_pages_over_limit)); seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n", atomic_read(&fscache_n_store_vmscan_not_storing), atomic_read(&fscache_n_store_vmscan_gone), atomic_read(&fscache_n_store_vmscan_busy), atomic_read(&fscache_n_store_vmscan_cancelled), atomic_read(&fscache_n_store_vmscan_wait)); seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n", atomic_read(&fscache_n_op_pend), atomic_read(&fscache_n_op_run), atomic_read(&fscache_n_op_enqueue), atomic_read(&fscache_n_op_cancelled), atomic_read(&fscache_n_op_rejected)); seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", atomic_read(&fscache_n_op_deferred_release), atomic_read(&fscache_n_op_release), atomic_read(&fscache_n_op_gc)); seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n", atomic_read(&fscache_n_cop_alloc_object), atomic_read(&fscache_n_cop_lookup_object), atomic_read(&fscache_n_cop_lookup_complete), atomic_read(&fscache_n_cop_grab_object)); seq_printf(m, "CacheOp: inv=%d upo=%d dro=%d pto=%d atc=%d syn=%d\n", atomic_read(&fscache_n_cop_invalidate_object), atomic_read(&fscache_n_cop_update_object), atomic_read(&fscache_n_cop_drop_object), atomic_read(&fscache_n_cop_put_object), atomic_read(&fscache_n_cop_attr_changed), atomic_read(&fscache_n_cop_sync_cache)); seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n", atomic_read(&fscache_n_cop_read_or_alloc_page), atomic_read(&fscache_n_cop_read_or_alloc_pages), atomic_read(&fscache_n_cop_allocate_page), atomic_read(&fscache_n_cop_allocate_pages), atomic_read(&fscache_n_cop_write_page), atomic_read(&fscache_n_cop_uncache_page), atomic_read(&fscache_n_cop_dissociate_pages)); return 0; } /* * open "/proc/fs/fscache/stats" allowing provision of a statistical summary */ static int fscache_stats_open(struct inode *inode, struct file *file) { return single_open(file, fscache_stats_show, NULL); } const struct file_operations fscache_stats_fops = { .owner = THIS_MODULE, .open = fscache_stats_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, };
gpl-2.0
falinux/terra-kernel
arch/mips/kernel/mips_ksyms.c
3828
1791
/* * Export MIPS-specific functions needed for loadable modules. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc. */ #include <linux/interrupt.h> #include <linux/module.h> #include <asm/checksum.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/ftrace.h> extern void *__bzero(void *__s, size_t __count); extern long __strncpy_from_user_nocheck_asm(char *__to, const char *__from, long __len); extern long __strncpy_from_user_asm(char *__to, const char *__from, long __len); extern long __strlen_user_nocheck_asm(const char *s); extern long __strlen_user_asm(const char *s); extern long __strnlen_user_nocheck_asm(const char *s); extern long __strnlen_user_asm(const char *s); /* * String functions */ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(kernel_thread); /* * Userspace access stuff. */ EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__copy_user_inatomic); EXPORT_SYMBOL(__bzero); EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm); EXPORT_SYMBOL(__strncpy_from_user_asm); EXPORT_SYMBOL(__strlen_user_nocheck_asm); EXPORT_SYMBOL(__strlen_user_asm); EXPORT_SYMBOL(__strnlen_user_nocheck_asm); EXPORT_SYMBOL(__strnlen_user_asm); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(__csum_partial_copy_user); EXPORT_SYMBOL(invalid_pte_table); #ifdef CONFIG_FUNCTION_TRACER /* _mcount is defined in arch/mips/kernel/mcount.S */ EXPORT_SYMBOL(_mcount); #endif
gpl-2.0
jfdsmabalot/kernel_linux-3.4.y
arch/alpha/kernel/binfmt_loader.c
4596
1120
#include <linux/init.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mm_types.h> #include <linux/binfmts.h> #include <linux/a.out.h> static int load_binary(struct linux_binprm *bprm, struct pt_regs *regs) { struct exec *eh = (struct exec *)bprm->buf; unsigned long loader; struct file *file; int retval; if (eh->fh.f_magic != 0x183 || (eh->fh.f_flags & 0x3000) != 0x3000) return -ENOEXEC; if (bprm->loader) return -ENOEXEC; allow_write_access(bprm->file); fput(bprm->file); bprm->file = NULL; loader = bprm->vma->vm_end - sizeof(void *); file = open_exec("/sbin/loader"); retval = PTR_ERR(file); if (IS_ERR(file)) return retval; /* Remember if the application is TASO. */ bprm->taso = eh->ah.entry < 0x100000000UL; bprm->file = file; bprm->loader = loader; retval = prepare_binprm(bprm); if (retval < 0) return retval; return search_binary_handler(bprm,regs); } static struct linux_binfmt loader_format = { .load_binary = load_binary, }; static int __init init_loader_binfmt(void) { insert_binfmt(&loader_format); return 0; } arch_initcall(init_loader_binfmt);
gpl-2.0
Split-Screen/android_kernel_motorola_otus
drivers/acpi/acpica/nsutils.c
4852
21068
/****************************************************************************** * * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing * parents and siblings and Scope manipulation * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "amlcode.h" #include "actables.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsutils") /* Local prototypes */ static u8 acpi_ns_valid_path_separator(char sep); #ifdef ACPI_OBSOLETE_FUNCTIONS acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search); #endif /******************************************************************************* * * FUNCTION: acpi_ns_print_node_pathname * * PARAMETERS: Node - Object * Message - Prefix message * * DESCRIPTION: Print an object's full namespace pathname * Manages allocation/freeing of a pathname buffer * ******************************************************************************/ void acpi_ns_print_node_pathname(struct acpi_namespace_node *node, const char *message) { struct acpi_buffer buffer; acpi_status status; if (!node) { acpi_os_printf("[NULL NAME]"); return; } /* Convert handle to full pathname and print it (with supplied message) */ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; status = acpi_ns_handle_to_pathname(node, &buffer); if (ACPI_SUCCESS(status)) { if (message) { acpi_os_printf("%s ", message); } acpi_os_printf("[%s] (Node %p)", (char *)buffer.pointer, node); ACPI_FREE(buffer.pointer); } } /******************************************************************************* * * FUNCTION: acpi_ns_valid_root_prefix * * PARAMETERS: Prefix - Character to be checked * * RETURN: TRUE if a valid prefix * * DESCRIPTION: Check if a character is a valid ACPI Root prefix * ******************************************************************************/ u8 acpi_ns_valid_root_prefix(char prefix) { return ((u8) (prefix == '\\')); } /******************************************************************************* * * FUNCTION: acpi_ns_valid_path_separator * * PARAMETERS: Sep - Character to be checked * * RETURN: TRUE if a valid path separator * * DESCRIPTION: Check if a character is a valid ACPI path separator * ******************************************************************************/ static u8 acpi_ns_valid_path_separator(char sep) { return ((u8) (sep == '.')); } /******************************************************************************* * * FUNCTION: acpi_ns_get_type * * PARAMETERS: Node - Parent Node to be examined * * RETURN: Type field from Node whose handle is passed * * DESCRIPTION: Return the type of a Namespace node * ******************************************************************************/ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node) { ACPI_FUNCTION_TRACE(ns_get_type); if (!node) { ACPI_WARNING((AE_INFO, "Null Node parameter")); return_UINT32(ACPI_TYPE_ANY); } return_UINT32((acpi_object_type) node->type); } /******************************************************************************* * * FUNCTION: acpi_ns_local * * PARAMETERS: Type - A namespace object type * * RETURN: LOCAL if names must be found locally in objects of the * passed type, 0 if enclosing scopes should be searched * * DESCRIPTION: Returns scope rule for the given object type. * ******************************************************************************/ u32 acpi_ns_local(acpi_object_type type) { ACPI_FUNCTION_TRACE(ns_local); if (!acpi_ut_valid_object_type(type)) { /* Type code out of range */ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type)); return_UINT32(ACPI_NS_NORMAL); } return_UINT32((u32) acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL); } /******************************************************************************* * * FUNCTION: acpi_ns_get_internal_name_length * * PARAMETERS: Info - Info struct initialized with the * external name pointer. * * RETURN: None * * DESCRIPTION: Calculate the length of the internal (AML) namestring * corresponding to the external (ASL) namestring. * ******************************************************************************/ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info) { const char *next_external_char; u32 i; ACPI_FUNCTION_ENTRY(); next_external_char = info->external_name; info->num_carats = 0; info->num_segments = 0; info->fully_qualified = FALSE; /* * For the internal name, the required length is 4 bytes per segment, plus * 1 each for root_prefix, multi_name_prefix_op, segment count, trailing null * (which is not really needed, but no there's harm in putting it there) * * strlen() + 1 covers the first name_seg, which has no path separator */ if (acpi_ns_valid_root_prefix(*next_external_char)) { info->fully_qualified = TRUE; next_external_char++; /* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */ while (acpi_ns_valid_root_prefix(*next_external_char)) { next_external_char++; } } else { /* Handle Carat prefixes */ while (*next_external_char == '^') { info->num_carats++; next_external_char++; } } /* * Determine the number of ACPI name "segments" by counting the number of * path separators within the string. Start with one segment since the * segment count is [(# separators) + 1], and zero separators is ok. */ if (*next_external_char) { info->num_segments = 1; for (i = 0; next_external_char[i]; i++) { if (acpi_ns_valid_path_separator(next_external_char[i])) { info->num_segments++; } } } info->length = (ACPI_NAME_SIZE * info->num_segments) + 4 + info->num_carats; info->next_external_char = next_external_char; } /******************************************************************************* * * FUNCTION: acpi_ns_build_internal_name * * PARAMETERS: Info - Info struct fully initialized * * RETURN: Status * * DESCRIPTION: Construct the internal (AML) namestring * corresponding to the external (ASL) namestring. * ******************************************************************************/ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) { u32 num_segments = info->num_segments; char *internal_name = info->internal_name; const char *external_name = info->next_external_char; char *result = NULL; u32 i; ACPI_FUNCTION_TRACE(ns_build_internal_name); /* Setup the correct prefixes, counts, and pointers */ if (info->fully_qualified) { internal_name[0] = '\\'; if (num_segments <= 1) { result = &internal_name[1]; } else if (num_segments == 2) { internal_name[1] = AML_DUAL_NAME_PREFIX; result = &internal_name[2]; } else { internal_name[1] = AML_MULTI_NAME_PREFIX_OP; internal_name[2] = (char)num_segments; result = &internal_name[3]; } } else { /* * Not fully qualified. * Handle Carats first, then append the name segments */ i = 0; if (info->num_carats) { for (i = 0; i < info->num_carats; i++) { internal_name[i] = '^'; } } if (num_segments <= 1) { result = &internal_name[i]; } else if (num_segments == 2) { internal_name[i] = AML_DUAL_NAME_PREFIX; result = &internal_name[(acpi_size) i + 1]; } else { internal_name[i] = AML_MULTI_NAME_PREFIX_OP; internal_name[(acpi_size) i + 1] = (char)num_segments; result = &internal_name[(acpi_size) i + 2]; } } /* Build the name (minus path separators) */ for (; num_segments; num_segments--) { for (i = 0; i < ACPI_NAME_SIZE; i++) { if (acpi_ns_valid_path_separator(*external_name) || (*external_name == 0)) { /* Pad the segment with underscore(s) if segment is short */ result[i] = '_'; } else { /* Convert the character to uppercase and save it */ result[i] = (char)ACPI_TOUPPER((int)*external_name); external_name++; } } /* Now we must have a path separator, or the pathname is bad */ if (!acpi_ns_valid_path_separator(*external_name) && (*external_name != 0)) { return_ACPI_STATUS(AE_BAD_PATHNAME); } /* Move on the next segment */ external_name++; result += ACPI_NAME_SIZE; } /* Terminate the string */ *result = 0; if (info->fully_qualified) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Returning [%p] (abs) \"\\%s\"\n", internal_name, internal_name)); } else { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Returning [%p] (rel) \"%s\"\n", internal_name, internal_name)); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_internalize_name * * PARAMETERS: *external_name - External representation of name * **Converted Name - Where to return the resulting * internal represention of the name * * RETURN: Status * * DESCRIPTION: Convert an external representation (e.g. "\_PR_.CPU0") * to internal form (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30) * *******************************************************************************/ acpi_status acpi_ns_internalize_name(const char *external_name, char **converted_name) { char *internal_name; struct acpi_namestring_info info; acpi_status status; ACPI_FUNCTION_TRACE(ns_internalize_name); if ((!external_name) || (*external_name == 0) || (!converted_name)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Get the length of the new internal name */ info.external_name = external_name; acpi_ns_get_internal_name_length(&info); /* We need a segment to store the internal name */ internal_name = ACPI_ALLOCATE_ZEROED(info.length); if (!internal_name) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Build the name */ info.internal_name = internal_name; status = acpi_ns_build_internal_name(&info); if (ACPI_FAILURE(status)) { ACPI_FREE(internal_name); return_ACPI_STATUS(status); } *converted_name = internal_name; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_externalize_name * * PARAMETERS: internal_name_length - Lenth of the internal name below * internal_name - Internal representation of name * converted_name_length - Where the length is returned * converted_name - Where the resulting external name * is returned * * RETURN: Status * * DESCRIPTION: Convert internal name (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30) * to its external (printable) form (e.g. "\_PR_.CPU0") * ******************************************************************************/ acpi_status acpi_ns_externalize_name(u32 internal_name_length, const char *internal_name, u32 * converted_name_length, char **converted_name) { u32 names_index = 0; u32 num_segments = 0; u32 required_length; u32 prefix_length = 0; u32 i = 0; u32 j = 0; ACPI_FUNCTION_TRACE(ns_externalize_name); if (!internal_name_length || !internal_name || !converted_name) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Check for a prefix (one '\' | one or more '^') */ switch (internal_name[0]) { case '\\': prefix_length = 1; break; case '^': for (i = 0; i < internal_name_length; i++) { if (internal_name[i] == '^') { prefix_length = i + 1; } else { break; } } if (i == internal_name_length) { prefix_length = i; } break; default: break; } /* * Check for object names. Note that there could be 0-255 of these * 4-byte elements. */ if (prefix_length < internal_name_length) { switch (internal_name[prefix_length]) { case AML_MULTI_NAME_PREFIX_OP: /* <count> 4-byte names */ names_index = prefix_length + 2; num_segments = (u8) internal_name[(acpi_size) prefix_length + 1]; break; case AML_DUAL_NAME_PREFIX: /* Two 4-byte names */ names_index = prefix_length + 1; num_segments = 2; break; case 0: /* null_name */ names_index = 0; num_segments = 0; break; default: /* one 4-byte name */ names_index = prefix_length; num_segments = 1; break; } } /* * Calculate the length of converted_name, which equals the length * of the prefix, length of all object names, length of any required * punctuation ('.') between object names, plus the NULL terminator. */ required_length = prefix_length + (4 * num_segments) + ((num_segments > 0) ? (num_segments - 1) : 0) + 1; /* * Check to see if we're still in bounds. If not, there's a problem * with internal_name (invalid format). */ if (required_length > internal_name_length) { ACPI_ERROR((AE_INFO, "Invalid internal name")); return_ACPI_STATUS(AE_BAD_PATHNAME); } /* Build the converted_name */ *converted_name = ACPI_ALLOCATE_ZEROED(required_length); if (!(*converted_name)) { return_ACPI_STATUS(AE_NO_MEMORY); } j = 0; for (i = 0; i < prefix_length; i++) { (*converted_name)[j++] = internal_name[i]; } if (num_segments > 0) { for (i = 0; i < num_segments; i++) { if (i > 0) { (*converted_name)[j++] = '.'; } (*converted_name)[j++] = internal_name[names_index++]; (*converted_name)[j++] = internal_name[names_index++]; (*converted_name)[j++] = internal_name[names_index++]; (*converted_name)[j++] = internal_name[names_index++]; } } if (converted_name_length) { *converted_name_length = (u32) required_length; } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_validate_handle * * PARAMETERS: Handle - Handle to be validated and typecast to a * namespace node. * * RETURN: A pointer to a namespace node * * DESCRIPTION: Convert a namespace handle to a namespace node. Handles special * cases for the root node. * * NOTE: Real integer handles would allow for more verification * and keep all pointers within this subsystem - however this introduces * more overhead and has not been necessary to this point. Drivers * holding handles are typically notified before a node becomes invalid * due to a table unload. * ******************************************************************************/ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle) { ACPI_FUNCTION_ENTRY(); /* Parameter validation */ if ((!handle) || (handle == ACPI_ROOT_OBJECT)) { return (acpi_gbl_root_node); } /* We can at least attempt to verify the handle */ if (ACPI_GET_DESCRIPTOR_TYPE(handle) != ACPI_DESC_TYPE_NAMED) { return (NULL); } return (ACPI_CAST_PTR(struct acpi_namespace_node, handle)); } /******************************************************************************* * * FUNCTION: acpi_ns_terminate * * PARAMETERS: none * * RETURN: none * * DESCRIPTION: free memory allocated for namespace and ACPI table storage. * ******************************************************************************/ void acpi_ns_terminate(void) { union acpi_operand_object *obj_desc; ACPI_FUNCTION_TRACE(ns_terminate); /* * 1) Free the entire namespace -- all nodes and objects * * Delete all object descriptors attached to namepsace nodes */ acpi_ns_delete_namespace_subtree(acpi_gbl_root_node); /* Detach any objects attached to the root */ obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node); if (obj_desc) { acpi_ns_detach_object(acpi_gbl_root_node); } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n")); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ns_opens_scope * * PARAMETERS: Type - A valid namespace type * * RETURN: NEWSCOPE if the passed type "opens a name scope" according * to the ACPI specification, else 0 * ******************************************************************************/ u32 acpi_ns_opens_scope(acpi_object_type type) { ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type)); if (!acpi_ut_valid_object_type(type)) { /* type code out of range */ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type)); return_UINT32(ACPI_NS_NORMAL); } return_UINT32(((u32) acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE); } /******************************************************************************* * * FUNCTION: acpi_ns_get_node * * PARAMETERS: *Pathname - Name to be found, in external (ASL) format. The * \ (backslash) and ^ (carat) prefixes, and the * . (period) to separate segments are supported. * prefix_node - Root of subtree to be searched, or NS_ALL for the * root of the name space. If Name is fully * qualified (first s8 is '\'), the passed value * of Scope will not be accessed. * Flags - Used to indicate whether to perform upsearch or * not. * return_node - Where the Node is returned * * DESCRIPTION: Look up a name relative to a given scope and return the * corresponding Node. NOTE: Scope can be null. * * MUTEX: Locks namespace * ******************************************************************************/ acpi_status acpi_ns_get_node(struct acpi_namespace_node *prefix_node, const char *pathname, u32 flags, struct acpi_namespace_node **return_node) { union acpi_generic_state scope_info; acpi_status status; char *internal_path; ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname)); if (!pathname) { *return_node = prefix_node; if (!prefix_node) { *return_node = acpi_gbl_root_node; } return_ACPI_STATUS(AE_OK); } /* Convert path to internal representation */ status = acpi_ns_internalize_name(pathname, &internal_path); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Must lock namespace during lookup */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { goto cleanup; } /* Setup lookup scope (search starting point) */ scope_info.scope.node = prefix_node; /* Lookup the name in the namespace */ status = acpi_ns_lookup(&scope_info, internal_path, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, (flags | ACPI_NS_DONT_OPEN_SCOPE), NULL, return_node); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s, %s\n", pathname, acpi_format_exception(status))); } (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); cleanup: ACPI_FREE(internal_path); return_ACPI_STATUS(status); }
gpl-2.0
Red680812/android_44_KitKat_kernel_htc_dlxpul-1
drivers/acpi/acpica/nsutils.c
4852
21068
/****************************************************************************** * * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing * parents and siblings and Scope manipulation * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "amlcode.h" #include "actables.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsutils") /* Local prototypes */ static u8 acpi_ns_valid_path_separator(char sep); #ifdef ACPI_OBSOLETE_FUNCTIONS acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search); #endif /******************************************************************************* * * FUNCTION: acpi_ns_print_node_pathname * * PARAMETERS: Node - Object * Message - Prefix message * * DESCRIPTION: Print an object's full namespace pathname * Manages allocation/freeing of a pathname buffer * ******************************************************************************/ void acpi_ns_print_node_pathname(struct acpi_namespace_node *node, const char *message) { struct acpi_buffer buffer; acpi_status status; if (!node) { acpi_os_printf("[NULL NAME]"); return; } /* Convert handle to full pathname and print it (with supplied message) */ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; status = acpi_ns_handle_to_pathname(node, &buffer); if (ACPI_SUCCESS(status)) { if (message) { acpi_os_printf("%s ", message); } acpi_os_printf("[%s] (Node %p)", (char *)buffer.pointer, node); ACPI_FREE(buffer.pointer); } } /******************************************************************************* * * FUNCTION: acpi_ns_valid_root_prefix * * PARAMETERS: Prefix - Character to be checked * * RETURN: TRUE if a valid prefix * * DESCRIPTION: Check if a character is a valid ACPI Root prefix * ******************************************************************************/ u8 acpi_ns_valid_root_prefix(char prefix) { return ((u8) (prefix == '\\')); } /******************************************************************************* * * FUNCTION: acpi_ns_valid_path_separator * * PARAMETERS: Sep - Character to be checked * * RETURN: TRUE if a valid path separator * * DESCRIPTION: Check if a character is a valid ACPI path separator * ******************************************************************************/ static u8 acpi_ns_valid_path_separator(char sep) { return ((u8) (sep == '.')); } /******************************************************************************* * * FUNCTION: acpi_ns_get_type * * PARAMETERS: Node - Parent Node to be examined * * RETURN: Type field from Node whose handle is passed * * DESCRIPTION: Return the type of a Namespace node * ******************************************************************************/ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node) { ACPI_FUNCTION_TRACE(ns_get_type); if (!node) { ACPI_WARNING((AE_INFO, "Null Node parameter")); return_UINT32(ACPI_TYPE_ANY); } return_UINT32((acpi_object_type) node->type); } /******************************************************************************* * * FUNCTION: acpi_ns_local * * PARAMETERS: Type - A namespace object type * * RETURN: LOCAL if names must be found locally in objects of the * passed type, 0 if enclosing scopes should be searched * * DESCRIPTION: Returns scope rule for the given object type. * ******************************************************************************/ u32 acpi_ns_local(acpi_object_type type) { ACPI_FUNCTION_TRACE(ns_local); if (!acpi_ut_valid_object_type(type)) { /* Type code out of range */ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type)); return_UINT32(ACPI_NS_NORMAL); } return_UINT32((u32) acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL); } /******************************************************************************* * * FUNCTION: acpi_ns_get_internal_name_length * * PARAMETERS: Info - Info struct initialized with the * external name pointer. * * RETURN: None * * DESCRIPTION: Calculate the length of the internal (AML) namestring * corresponding to the external (ASL) namestring. * ******************************************************************************/ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info) { const char *next_external_char; u32 i; ACPI_FUNCTION_ENTRY(); next_external_char = info->external_name; info->num_carats = 0; info->num_segments = 0; info->fully_qualified = FALSE; /* * For the internal name, the required length is 4 bytes per segment, plus * 1 each for root_prefix, multi_name_prefix_op, segment count, trailing null * (which is not really needed, but no there's harm in putting it there) * * strlen() + 1 covers the first name_seg, which has no path separator */ if (acpi_ns_valid_root_prefix(*next_external_char)) { info->fully_qualified = TRUE; next_external_char++; /* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */ while (acpi_ns_valid_root_prefix(*next_external_char)) { next_external_char++; } } else { /* Handle Carat prefixes */ while (*next_external_char == '^') { info->num_carats++; next_external_char++; } } /* * Determine the number of ACPI name "segments" by counting the number of * path separators within the string. Start with one segment since the * segment count is [(# separators) + 1], and zero separators is ok. */ if (*next_external_char) { info->num_segments = 1; for (i = 0; next_external_char[i]; i++) { if (acpi_ns_valid_path_separator(next_external_char[i])) { info->num_segments++; } } } info->length = (ACPI_NAME_SIZE * info->num_segments) + 4 + info->num_carats; info->next_external_char = next_external_char; } /******************************************************************************* * * FUNCTION: acpi_ns_build_internal_name * * PARAMETERS: Info - Info struct fully initialized * * RETURN: Status * * DESCRIPTION: Construct the internal (AML) namestring * corresponding to the external (ASL) namestring. * ******************************************************************************/ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) { u32 num_segments = info->num_segments; char *internal_name = info->internal_name; const char *external_name = info->next_external_char; char *result = NULL; u32 i; ACPI_FUNCTION_TRACE(ns_build_internal_name); /* Setup the correct prefixes, counts, and pointers */ if (info->fully_qualified) { internal_name[0] = '\\'; if (num_segments <= 1) { result = &internal_name[1]; } else if (num_segments == 2) { internal_name[1] = AML_DUAL_NAME_PREFIX; result = &internal_name[2]; } else { internal_name[1] = AML_MULTI_NAME_PREFIX_OP; internal_name[2] = (char)num_segments; result = &internal_name[3]; } } else { /* * Not fully qualified. * Handle Carats first, then append the name segments */ i = 0; if (info->num_carats) { for (i = 0; i < info->num_carats; i++) { internal_name[i] = '^'; } } if (num_segments <= 1) { result = &internal_name[i]; } else if (num_segments == 2) { internal_name[i] = AML_DUAL_NAME_PREFIX; result = &internal_name[(acpi_size) i + 1]; } else { internal_name[i] = AML_MULTI_NAME_PREFIX_OP; internal_name[(acpi_size) i + 1] = (char)num_segments; result = &internal_name[(acpi_size) i + 2]; } } /* Build the name (minus path separators) */ for (; num_segments; num_segments--) { for (i = 0; i < ACPI_NAME_SIZE; i++) { if (acpi_ns_valid_path_separator(*external_name) || (*external_name == 0)) { /* Pad the segment with underscore(s) if segment is short */ result[i] = '_'; } else { /* Convert the character to uppercase and save it */ result[i] = (char)ACPI_TOUPPER((int)*external_name); external_name++; } } /* Now we must have a path separator, or the pathname is bad */ if (!acpi_ns_valid_path_separator(*external_name) && (*external_name != 0)) { return_ACPI_STATUS(AE_BAD_PATHNAME); } /* Move on the next segment */ external_name++; result += ACPI_NAME_SIZE; } /* Terminate the string */ *result = 0; if (info->fully_qualified) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Returning [%p] (abs) \"\\%s\"\n", internal_name, internal_name)); } else { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Returning [%p] (rel) \"%s\"\n", internal_name, internal_name)); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_internalize_name * * PARAMETERS: *external_name - External representation of name * **Converted Name - Where to return the resulting * internal represention of the name * * RETURN: Status * * DESCRIPTION: Convert an external representation (e.g. "\_PR_.CPU0") * to internal form (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30) * *******************************************************************************/ acpi_status acpi_ns_internalize_name(const char *external_name, char **converted_name) { char *internal_name; struct acpi_namestring_info info; acpi_status status; ACPI_FUNCTION_TRACE(ns_internalize_name); if ((!external_name) || (*external_name == 0) || (!converted_name)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Get the length of the new internal name */ info.external_name = external_name; acpi_ns_get_internal_name_length(&info); /* We need a segment to store the internal name */ internal_name = ACPI_ALLOCATE_ZEROED(info.length); if (!internal_name) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Build the name */ info.internal_name = internal_name; status = acpi_ns_build_internal_name(&info); if (ACPI_FAILURE(status)) { ACPI_FREE(internal_name); return_ACPI_STATUS(status); } *converted_name = internal_name; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_externalize_name * * PARAMETERS: internal_name_length - Lenth of the internal name below * internal_name - Internal representation of name * converted_name_length - Where the length is returned * converted_name - Where the resulting external name * is returned * * RETURN: Status * * DESCRIPTION: Convert internal name (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30) * to its external (printable) form (e.g. "\_PR_.CPU0") * ******************************************************************************/ acpi_status acpi_ns_externalize_name(u32 internal_name_length, const char *internal_name, u32 * converted_name_length, char **converted_name) { u32 names_index = 0; u32 num_segments = 0; u32 required_length; u32 prefix_length = 0; u32 i = 0; u32 j = 0; ACPI_FUNCTION_TRACE(ns_externalize_name); if (!internal_name_length || !internal_name || !converted_name) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Check for a prefix (one '\' | one or more '^') */ switch (internal_name[0]) { case '\\': prefix_length = 1; break; case '^': for (i = 0; i < internal_name_length; i++) { if (internal_name[i] == '^') { prefix_length = i + 1; } else { break; } } if (i == internal_name_length) { prefix_length = i; } break; default: break; } /* * Check for object names. Note that there could be 0-255 of these * 4-byte elements. */ if (prefix_length < internal_name_length) { switch (internal_name[prefix_length]) { case AML_MULTI_NAME_PREFIX_OP: /* <count> 4-byte names */ names_index = prefix_length + 2; num_segments = (u8) internal_name[(acpi_size) prefix_length + 1]; break; case AML_DUAL_NAME_PREFIX: /* Two 4-byte names */ names_index = prefix_length + 1; num_segments = 2; break; case 0: /* null_name */ names_index = 0; num_segments = 0; break; default: /* one 4-byte name */ names_index = prefix_length; num_segments = 1; break; } } /* * Calculate the length of converted_name, which equals the length * of the prefix, length of all object names, length of any required * punctuation ('.') between object names, plus the NULL terminator. */ required_length = prefix_length + (4 * num_segments) + ((num_segments > 0) ? (num_segments - 1) : 0) + 1; /* * Check to see if we're still in bounds. If not, there's a problem * with internal_name (invalid format). */ if (required_length > internal_name_length) { ACPI_ERROR((AE_INFO, "Invalid internal name")); return_ACPI_STATUS(AE_BAD_PATHNAME); } /* Build the converted_name */ *converted_name = ACPI_ALLOCATE_ZEROED(required_length); if (!(*converted_name)) { return_ACPI_STATUS(AE_NO_MEMORY); } j = 0; for (i = 0; i < prefix_length; i++) { (*converted_name)[j++] = internal_name[i]; } if (num_segments > 0) { for (i = 0; i < num_segments; i++) { if (i > 0) { (*converted_name)[j++] = '.'; } (*converted_name)[j++] = internal_name[names_index++]; (*converted_name)[j++] = internal_name[names_index++]; (*converted_name)[j++] = internal_name[names_index++]; (*converted_name)[j++] = internal_name[names_index++]; } } if (converted_name_length) { *converted_name_length = (u32) required_length; } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_validate_handle * * PARAMETERS: Handle - Handle to be validated and typecast to a * namespace node. * * RETURN: A pointer to a namespace node * * DESCRIPTION: Convert a namespace handle to a namespace node. Handles special * cases for the root node. * * NOTE: Real integer handles would allow for more verification * and keep all pointers within this subsystem - however this introduces * more overhead and has not been necessary to this point. Drivers * holding handles are typically notified before a node becomes invalid * due to a table unload. * ******************************************************************************/ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle) { ACPI_FUNCTION_ENTRY(); /* Parameter validation */ if ((!handle) || (handle == ACPI_ROOT_OBJECT)) { return (acpi_gbl_root_node); } /* We can at least attempt to verify the handle */ if (ACPI_GET_DESCRIPTOR_TYPE(handle) != ACPI_DESC_TYPE_NAMED) { return (NULL); } return (ACPI_CAST_PTR(struct acpi_namespace_node, handle)); } /******************************************************************************* * * FUNCTION: acpi_ns_terminate * * PARAMETERS: none * * RETURN: none * * DESCRIPTION: free memory allocated for namespace and ACPI table storage. * ******************************************************************************/ void acpi_ns_terminate(void) { union acpi_operand_object *obj_desc; ACPI_FUNCTION_TRACE(ns_terminate); /* * 1) Free the entire namespace -- all nodes and objects * * Delete all object descriptors attached to namepsace nodes */ acpi_ns_delete_namespace_subtree(acpi_gbl_root_node); /* Detach any objects attached to the root */ obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node); if (obj_desc) { acpi_ns_detach_object(acpi_gbl_root_node); } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n")); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ns_opens_scope * * PARAMETERS: Type - A valid namespace type * * RETURN: NEWSCOPE if the passed type "opens a name scope" according * to the ACPI specification, else 0 * ******************************************************************************/ u32 acpi_ns_opens_scope(acpi_object_type type) { ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type)); if (!acpi_ut_valid_object_type(type)) { /* type code out of range */ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type)); return_UINT32(ACPI_NS_NORMAL); } return_UINT32(((u32) acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE); } /******************************************************************************* * * FUNCTION: acpi_ns_get_node * * PARAMETERS: *Pathname - Name to be found, in external (ASL) format. The * \ (backslash) and ^ (carat) prefixes, and the * . (period) to separate segments are supported. * prefix_node - Root of subtree to be searched, or NS_ALL for the * root of the name space. If Name is fully * qualified (first s8 is '\'), the passed value * of Scope will not be accessed. * Flags - Used to indicate whether to perform upsearch or * not. * return_node - Where the Node is returned * * DESCRIPTION: Look up a name relative to a given scope and return the * corresponding Node. NOTE: Scope can be null. * * MUTEX: Locks namespace * ******************************************************************************/ acpi_status acpi_ns_get_node(struct acpi_namespace_node *prefix_node, const char *pathname, u32 flags, struct acpi_namespace_node **return_node) { union acpi_generic_state scope_info; acpi_status status; char *internal_path; ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname)); if (!pathname) { *return_node = prefix_node; if (!prefix_node) { *return_node = acpi_gbl_root_node; } return_ACPI_STATUS(AE_OK); } /* Convert path to internal representation */ status = acpi_ns_internalize_name(pathname, &internal_path); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Must lock namespace during lookup */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { goto cleanup; } /* Setup lookup scope (search starting point) */ scope_info.scope.node = prefix_node; /* Lookup the name in the namespace */ status = acpi_ns_lookup(&scope_info, internal_path, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, (flags | ACPI_NS_DONT_OPEN_SCOPE), NULL, return_node); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s, %s\n", pathname, acpi_format_exception(status))); } (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); cleanup: ACPI_FREE(internal_path); return_ACPI_STATUS(status); }
gpl-2.0
MinimalOS-AOSP/kernel_lge_hammerhead
arch/arm/plat-omap/fb.c
4852
2636
/* * File: arch/arm/plat-omap/fb.c * * Framebuffer device registration for TI OMAP platforms * * Copyright (C) 2006 Nokia Corporation * Author: Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/memblock.h> #include <linux/io.h> #include <linux/omapfb.h> #include <mach/hardware.h> #include <asm/mach/map.h> #include <plat/board.h> #if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE) static bool omapfb_lcd_configured; static struct omapfb_platform_data omapfb_config; static u64 omap_fb_dma_mask = ~(u32)0; static struct platform_device omap_fb_device = { .name = "omapfb", .id = -1, .dev = { .dma_mask = &omap_fb_dma_mask, .coherent_dma_mask = ~(u32)0, .platform_data = &omapfb_config, }, .num_resources = 0, }; void __init omapfb_set_lcd_config(const struct omap_lcd_config *config) { omapfb_config.lcd = *config; omapfb_lcd_configured = true; } static int __init omap_init_fb(void) { /* * If the board file has not set the lcd config with * omapfb_set_lcd_config(), don't bother registering the omapfb device */ if (!omapfb_lcd_configured) return 0; return platform_device_register(&omap_fb_device); } arch_initcall(omap_init_fb); #elif defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) static u64 omap_fb_dma_mask = ~(u32)0; static struct omapfb_platform_data omapfb_config; static struct platform_device omap_fb_device = { .name = "omapfb", .id = -1, .dev = { .dma_mask = &omap_fb_dma_mask, .coherent_dma_mask = ~(u32)0, .platform_data = &omapfb_config, }, .num_resources = 0, }; static int __init omap_init_fb(void) { return platform_device_register(&omap_fb_device); } arch_initcall(omap_init_fb); #else void __init omapfb_set_lcd_config(const struct omap_lcd_config *config) { } #endif
gpl-2.0
haydenbbickerton/zaracl_kernel
arch/microblaze/lib/memmove.c
7668
5442
/* * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2008-2009 PetaLogix * Copyright (C) 2007 John Williams * * Reasonably optimised generic C-code for memcpy on Microblaze * This is generic C code to do efficient, alignment-aware memmove. * * It is based on demo code originally Copyright 2001 by Intel Corp, taken from * http://www.embedded.com/showArticle.jhtml?articleID=19205567 * * Attempts were made, unsuccessfully, to contact the original * author of this code (Michael Morrow, Intel). Below is the original * copyright notice. * * This software has been developed by Intel Corporation. * Intel specifically disclaims all warranties, express or * implied, and all liability, including consequential and * other indirect damages, for the use of this program, including * liability for infringement of any proprietary rights, * and including the warranties of merchantability and fitness * for a particular purpose. Intel does not assume any * responsibility for and errors which may appear in this program * not any responsibility to update it. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/compiler.h> #include <linux/module.h> #include <linux/string.h> #ifdef __HAVE_ARCH_MEMMOVE #ifndef CONFIG_OPT_LIB_FUNCTION void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; if (!c) return v_dst; /* Use memcpy when source is higher than dest */ if (v_dst <= v_src) return memcpy(v_dst, v_src, c); /* copy backwards, from end to beginning */ src += c; dst += c; /* Simple, byte oriented memmove. */ while (c--) *--dst = *--src; return v_dst; } #else /* CONFIG_OPT_LIB_FUNCTION */ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; const uint32_t *i_src; uint32_t *i_dst; if (!c) return v_dst; /* Use memcpy when source is higher than dest */ if (v_dst <= v_src) return memcpy(v_dst, v_src, c); /* The following code tries to optimize the copy by using unsigned * alignment. This will work fine if both source and destination are * aligned on the same boundary. However, if they are aligned on * different boundaries shifts will be necessary. This might result in * bad performance on MicroBlaze systems without a barrel shifter. */ /* FIXME this part needs more test */ /* Do a descending copy - this is a bit trickier! */ dst += c; src += c; if (c >= 4) { unsigned value, buf_hold; /* Align the destination to a word boundary. */ /* This is done in an endian independent manner. */ switch ((unsigned long)dst & 3) { case 3: *--dst = *--src; --c; case 2: *--dst = *--src; --c; case 1: *--dst = *--src; --c; } i_dst = (void *)dst; /* Choose a copy scheme based on the source */ /* alignment relative to dstination. */ switch ((unsigned long)src & 3) { case 0x0: /* Both byte offsets are aligned */ i_src = (const void *)src; for (; c >= 4; c -= 4) *--i_dst = *--i_src; src = (const void *)i_src; break; case 0x1: /* Unaligned - Off by 1 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 24; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 8 | value; buf_hold = value >> 24; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFF) << 24; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFFFFFF00)>>8); buf_hold = (value & 0xFF) << 24; } #endif /* Realign the source */ src = (const void *)i_src; src += 1; break; case 0x2: /* Unaligned - Off by 2 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 16; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 16 | value; buf_hold = value >> 16; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFFFF) << 16; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFFFF0000)>>16); buf_hold = (value & 0xFFFF) << 16; } #endif /* Realign the source */ src = (const void *)i_src; src += 2; break; case 0x3: /* Unaligned - Off by 3 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 8; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 24 | value; buf_hold = value >> 8; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFFFFFF) << 8; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFF000000)>> 24); buf_hold = (value & 0xFFFFFF) << 8; } #endif /* Realign the source */ src = (const void *)i_src; src += 3; break; } dst = (void *)i_dst; } /* simple fast copy, ... unless a cache boundary is crossed */ /* Finish off any remaining bytes */ switch (c) { case 4: *--dst = *--src; case 3: *--dst = *--src; case 2: *--dst = *--src; case 1: *--dst = *--src; } return v_dst; } #endif /* CONFIG_OPT_LIB_FUNCTION */ EXPORT_SYMBOL(memmove); #endif /* __HAVE_ARCH_MEMMOVE */
gpl-2.0
TeamWin/android_kernel_oneplus_msm8974
drivers/scsi/sun_esp.c
8180
14539
/* sun_esp.c: ESP front-end for Sparc SBUS systems. * * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/gfp.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/dma.h> #include <scsi/scsi_host.h> #include "esp_scsi.h" #define DRV_MODULE_NAME "sun_esp" #define PFX DRV_MODULE_NAME ": " #define DRV_VERSION "1.100" #define DRV_MODULE_RELDATE "August 27, 2008" #define dma_read32(REG) \ sbus_readl(esp->dma_regs + (REG)) #define dma_write32(VAL, REG) \ sbus_writel((VAL), esp->dma_regs + (REG)) /* DVMA chip revisions */ enum dvma_rev { dvmarev0, dvmaesc1, dvmarev1, dvmarev2, dvmarev3, dvmarevplus, dvmahme }; static int __devinit esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of) { esp->dma = dma_of; esp->dma_regs = of_ioremap(&dma_of->resource[0], 0, resource_size(&dma_of->resource[0]), "espdma"); if (!esp->dma_regs) return -ENOMEM; switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) { case DMA_VERS0: esp->dmarev = dvmarev0; break; case DMA_ESCV1: esp->dmarev = dvmaesc1; break; case DMA_VERS1: esp->dmarev = dvmarev1; break; case DMA_VERS2: esp->dmarev = dvmarev2; break; case DMA_VERHME: esp->dmarev = dvmahme; break; case DMA_VERSPLUS: esp->dmarev = dvmarevplus; break; } return 0; } static int __devinit esp_sbus_map_regs(struct esp *esp, int hme) { struct platform_device *op = esp->dev; struct resource *res; /* On HME, two reg sets exist, first is DVMA, * second is ESP registers. */ if (hme) res = &op->resource[1]; else res = &op->resource[0]; esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP"); if (!esp->regs) return -ENOMEM; return 0; } static int __devinit esp_sbus_map_command_block(struct esp *esp) { struct platform_device *op = esp->dev; esp->command_block = dma_alloc_coherent(&op->dev, 16, &esp->command_block_dma, GFP_ATOMIC); if (!esp->command_block) return -ENOMEM; return 0; } static int __devinit esp_sbus_register_irq(struct esp *esp) { struct Scsi_Host *host = esp->host; struct platform_device *op = esp->dev; host->irq = op->archdata.irqs[0]; return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); } static void __devinit esp_get_scsi_id(struct esp *esp, struct platform_device *espdma) { struct platform_device *op = esp->dev; struct device_node *dp; dp = op->dev.of_node; esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); if (esp->scsi_id != 0xff) goto done; esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff); if (esp->scsi_id != 0xff) goto done; esp->scsi_id = of_getintprop_default(espdma->dev.of_node, "scsi-initiator-id", 7); done: esp->host->this_id = esp->scsi_id; esp->scsi_id_mask = (1 << esp->scsi_id); } static void __devinit esp_get_differential(struct esp *esp) { struct platform_device *op = esp->dev; struct device_node *dp; dp = op->dev.of_node; if (of_find_property(dp, "differential", NULL)) esp->flags |= ESP_FLAG_DIFFERENTIAL; else esp->flags &= ~ESP_FLAG_DIFFERENTIAL; } static void __devinit esp_get_clock_params(struct esp *esp) { struct platform_device *op = esp->dev; struct device_node *bus_dp, *dp; int fmhz; dp = op->dev.of_node; bus_dp = dp->parent; fmhz = of_getintprop_default(dp, "clock-frequency", 0); if (fmhz == 0) fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0); esp->cfreq = fmhz; } static void __devinit esp_get_bursts(struct esp *esp, struct platform_device *dma_of) { struct device_node *dma_dp = dma_of->dev.of_node; struct platform_device *op = esp->dev; struct device_node *dp; u8 bursts, val; dp = op->dev.of_node; bursts = of_getintprop_default(dp, "burst-sizes", 0xff); val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); if (val != 0xff) bursts &= val; val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff); if (val != 0xff) bursts &= val; if (bursts == 0xff || (bursts & DMA_BURST16) == 0 || (bursts & DMA_BURST32) == 0) bursts = (DMA_BURST32 - 1); esp->bursts = bursts; } static void __devinit esp_sbus_get_props(struct esp *esp, struct platform_device *espdma) { esp_get_scsi_id(esp, espdma); esp_get_differential(esp); esp_get_clock_params(esp); esp_get_bursts(esp, espdma); } static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg) { sbus_writeb(val, esp->regs + (reg * 4UL)); } static u8 sbus_esp_read8(struct esp *esp, unsigned long reg) { return sbus_readb(esp->regs + (reg * 4UL)); } static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf, size_t sz, int dir) { struct platform_device *op = esp->dev; return dma_map_single(&op->dev, buf, sz, dir); } static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, int num_sg, int dir) { struct platform_device *op = esp->dev; return dma_map_sg(&op->dev, sg, num_sg, dir); } static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, size_t sz, int dir) { struct platform_device *op = esp->dev; dma_unmap_single(&op->dev, addr, sz, dir); } static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, int num_sg, int dir) { struct platform_device *op = esp->dev; dma_unmap_sg(&op->dev, sg, num_sg, dir); } static int sbus_esp_irq_pending(struct esp *esp) { if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) return 1; return 0; } static void sbus_esp_reset_dma(struct esp *esp) { int can_do_burst16, can_do_burst32, can_do_burst64; int can_do_sbus64, lim; struct platform_device *op; u32 val; can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; can_do_burst64 = 0; can_do_sbus64 = 0; op = esp->dev; if (sbus_can_dma_64bit()) can_do_sbus64 = 1; if (sbus_can_burst64()) can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; /* Put the DVMA into a known state. */ if (esp->dmarev != dvmahme) { val = dma_read32(DMA_CSR); dma_write32(val | DMA_RST_SCSI, DMA_CSR); dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); } switch (esp->dmarev) { case dvmahme: dma_write32(DMA_RESET_FAS366, DMA_CSR); dma_write32(DMA_RST_SCSI, DMA_CSR); esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS | DMA_SCSI_DISAB | DMA_INT_ENAB); esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BRST_SZ); if (can_do_burst64) esp->prev_hme_dmacsr |= DMA_BRST64; else if (can_do_burst32) esp->prev_hme_dmacsr |= DMA_BRST32; if (can_do_sbus64) { esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; sbus_set_sbus64(&op->dev, esp->bursts); } lim = 1000; while (dma_read32(DMA_CSR) & DMA_PEND_READ) { if (--lim == 0) { printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ " "will not clear!\n", esp->host->unique_id); break; } udelay(1); } dma_write32(0, DMA_CSR); dma_write32(esp->prev_hme_dmacsr, DMA_CSR); dma_write32(0, DMA_ADDR); break; case dvmarev2: if (esp->rev != ESP100) { val = dma_read32(DMA_CSR); dma_write32(val | DMA_3CLKS, DMA_CSR); } break; case dvmarev3: val = dma_read32(DMA_CSR); val &= ~DMA_3CLKS; val |= DMA_2CLKS; if (can_do_burst32) { val &= ~DMA_BRST_SZ; val |= DMA_BRST32; } dma_write32(val, DMA_CSR); break; case dvmaesc1: val = dma_read32(DMA_CSR); val |= DMA_ADD_ENABLE; val &= ~DMA_BCNT_ENAB; if (!can_do_burst32 && can_do_burst16) { val |= DMA_ESC_BURST; } else { val &= ~(DMA_ESC_BURST); } dma_write32(val, DMA_CSR); break; default: break; } /* Enable interrupts. */ val = dma_read32(DMA_CSR); dma_write32(val | DMA_INT_ENAB, DMA_CSR); } static void sbus_esp_dma_drain(struct esp *esp) { u32 csr; int lim; if (esp->dmarev == dvmahme) return; csr = dma_read32(DMA_CSR); if (!(csr & DMA_FIFO_ISDRAIN)) return; if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1) dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); lim = 1000; while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { if (--lim == 0) { printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", esp->host->unique_id); break; } udelay(1); } } static void sbus_esp_dma_invalidate(struct esp *esp) { if (esp->dmarev == dvmahme) { dma_write32(DMA_RST_SCSI, DMA_CSR); esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | (DMA_PARITY_OFF | DMA_2CLKS | DMA_SCSI_DISAB | DMA_INT_ENAB)) & ~(DMA_ST_WRITE | DMA_ENABLE)); dma_write32(0, DMA_CSR); dma_write32(esp->prev_hme_dmacsr, DMA_CSR); /* This is necessary to avoid having the SCSI channel * engine lock up on us. */ dma_write32(0, DMA_ADDR); } else { u32 val; int lim; lim = 1000; while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { if (--lim == 0) { printk(KERN_ALERT PFX "esp%d: DMA will not " "invalidate!\n", esp->host->unique_id); break; } udelay(1); } val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); val |= DMA_FIFO_INV; dma_write32(val, DMA_CSR); val &= ~DMA_FIFO_INV; dma_write32(val, DMA_CSR); } } static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { u32 csr; BUG_ON(!(cmd & ESP_CMD_DMA)); sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); if (esp->rev == FASHME) { sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO); sbus_esp_write8(esp, 0, FAS_RHI); scsi_esp_cmd(esp, cmd); csr = esp->prev_hme_dmacsr; csr |= DMA_SCSI_DISAB | DMA_ENABLE; if (write) csr |= DMA_ST_WRITE; else csr &= ~DMA_ST_WRITE; esp->prev_hme_dmacsr = csr; dma_write32(dma_count, DMA_COUNT); dma_write32(addr, DMA_ADDR); dma_write32(csr, DMA_CSR); } else { csr = dma_read32(DMA_CSR); csr |= DMA_ENABLE; if (write) csr |= DMA_ST_WRITE; else csr &= ~DMA_ST_WRITE; dma_write32(csr, DMA_CSR); if (esp->dmarev == dvmaesc1) { u32 end = PAGE_ALIGN(addr + dma_count + 16U); dma_write32(end - addr, DMA_COUNT); } dma_write32(addr, DMA_ADDR); scsi_esp_cmd(esp, cmd); } } static int sbus_esp_dma_error(struct esp *esp) { u32 csr = dma_read32(DMA_CSR); if (csr & DMA_HNDL_ERROR) return 1; return 0; } static const struct esp_driver_ops sbus_esp_ops = { .esp_write8 = sbus_esp_write8, .esp_read8 = sbus_esp_read8, .map_single = sbus_esp_map_single, .map_sg = sbus_esp_map_sg, .unmap_single = sbus_esp_unmap_single, .unmap_sg = sbus_esp_unmap_sg, .irq_pending = sbus_esp_irq_pending, .reset_dma = sbus_esp_reset_dma, .dma_drain = sbus_esp_dma_drain, .dma_invalidate = sbus_esp_dma_invalidate, .send_dma_cmd = sbus_esp_send_dma_cmd, .dma_error = sbus_esp_dma_error, }; static int __devinit esp_sbus_probe_one(struct platform_device *op, struct platform_device *espdma, int hme) { struct scsi_host_template *tpnt = &scsi_esp_template; struct Scsi_Host *host; struct esp *esp; int err; host = scsi_host_alloc(tpnt, sizeof(struct esp)); err = -ENOMEM; if (!host) goto fail; host->max_id = (hme ? 16 : 8); esp = shost_priv(host); esp->host = host; esp->dev = op; esp->ops = &sbus_esp_ops; if (hme) esp->flags |= ESP_FLAG_WIDE_CAPABLE; err = esp_sbus_setup_dma(esp, espdma); if (err < 0) goto fail_unlink; err = esp_sbus_map_regs(esp, hme); if (err < 0) goto fail_unlink; err = esp_sbus_map_command_block(esp); if (err < 0) goto fail_unmap_regs; err = esp_sbus_register_irq(esp); if (err < 0) goto fail_unmap_command_block; esp_sbus_get_props(esp, espdma); /* Before we try to touch the ESP chip, ESC1 dma can * come up with the reset bit set, so make sure that * is clear first. */ if (esp->dmarev == dvmaesc1) { u32 val = dma_read32(DMA_CSR); dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); } dev_set_drvdata(&op->dev, esp); err = scsi_esp_register(esp, &op->dev); if (err) goto fail_free_irq; return 0; fail_free_irq: free_irq(host->irq, esp); fail_unmap_command_block: dma_free_coherent(&op->dev, 16, esp->command_block, esp->command_block_dma); fail_unmap_regs: of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE); fail_unlink: scsi_host_put(host); fail: return err; } static int __devinit esp_sbus_probe(struct platform_device *op) { struct device_node *dma_node = NULL; struct device_node *dp = op->dev.of_node; struct platform_device *dma_of = NULL; int hme = 0; if (dp->parent && (!strcmp(dp->parent->name, "espdma") || !strcmp(dp->parent->name, "dma"))) dma_node = dp->parent; else if (!strcmp(dp->name, "SUNW,fas")) { dma_node = op->dev.of_node; hme = 1; } if (dma_node) dma_of = of_find_device_by_node(dma_node); if (!dma_of) return -ENODEV; return esp_sbus_probe_one(op, dma_of, hme); } static int __devexit esp_sbus_remove(struct platform_device *op) { struct esp *esp = dev_get_drvdata(&op->dev); struct platform_device *dma_of = esp->dma; unsigned int irq = esp->host->irq; bool is_hme; u32 val; scsi_esp_unregister(esp); /* Disable interrupts. */ val = dma_read32(DMA_CSR); dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); free_irq(irq, esp); is_hme = (esp->dmarev == dvmahme); dma_free_coherent(&op->dev, 16, esp->command_block, esp->command_block_dma); of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE); of_iounmap(&dma_of->resource[0], esp->dma_regs, resource_size(&dma_of->resource[0])); scsi_host_put(esp->host); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id esp_match[] = { { .name = "SUNW,esp", }, { .name = "SUNW,fas", }, { .name = "esp", }, {}, }; MODULE_DEVICE_TABLE(of, esp_match); static struct platform_driver esp_sbus_driver = { .driver = { .name = "esp", .owner = THIS_MODULE, .of_match_table = esp_match, }, .probe = esp_sbus_probe, .remove = __devexit_p(esp_sbus_remove), }; static int __init sunesp_init(void) { return platform_driver_register(&esp_sbus_driver); } static void __exit sunesp_exit(void) { platform_driver_unregister(&esp_sbus_driver); } MODULE_DESCRIPTION("Sun ESP SCSI driver"); MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); module_init(sunesp_init); module_exit(sunesp_exit);
gpl-2.0
poondog/KANGAROO-kernel
arch/alpha/kernel/core_wildfire.c
13812
17607
/* * linux/arch/alpha/kernel/core_wildfire.c * * Wildfire support. * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_wildfire.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/smp.h> #include "proto.h" #include "pci_impl.h" #define DEBUG_CONFIG 0 #define DEBUG_DUMP_REGS 0 #define DEBUG_DUMP_CONFIG 1 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif #if DEBUG_DUMP_REGS static void wildfire_dump_pci_regs(int qbbno, int hoseno); static void wildfire_dump_pca_regs(int qbbno, int pcano); static void wildfire_dump_qsa_regs(int qbbno); static void wildfire_dump_qsd_regs(int qbbno); static void wildfire_dump_iop_regs(int qbbno); static void wildfire_dump_gp_regs(int qbbno); #endif #if DEBUG_DUMP_CONFIG static void wildfire_dump_hardware_config(void); #endif unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB]; unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB]; #define QBB_MAP_EMPTY 0xff unsigned long wildfire_hard_qbb_mask; unsigned long wildfire_soft_qbb_mask; unsigned long wildfire_gp_mask; unsigned long wildfire_hs_mask; unsigned long wildfire_iop_mask; unsigned long wildfire_ior_mask; unsigned long wildfire_pca_mask; unsigned long wildfire_cpu_mask; unsigned long wildfire_mem_mask; void __init wildfire_init_hose(int qbbno, int hoseno) { struct pci_controller *hose; wildfire_pci *pci; hose = alloc_pci_controller(); hose->io_space = alloc_resource(); hose->mem_space = alloc_resource(); /* This is for userland consumption. */ hose->sparse_mem_base = 0; hose->sparse_io_base = 0; hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno); hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno); hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno); hose->index = (qbbno << 3) + hoseno; hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS; hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1; hose->io_space->name = pci_io_names[hoseno]; hose->io_space->flags = IORESOURCE_IO; hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS; hose->mem_space->end = hose->mem_space->start + 0xffffffff; hose->mem_space->name = pci_mem_names[hoseno]; hose->mem_space->flags = IORESOURCE_MEM; if (request_resource(&ioport_resource, hose->io_space) < 0) printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n", qbbno, hoseno); if (request_resource(&iomem_resource, hose->mem_space) < 0) printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n", qbbno, hoseno); #if DEBUG_DUMP_REGS wildfire_dump_pci_regs(qbbno, hoseno); #endif /* * Set up the PCI to main memory translation windows. * * Note: Window 3 is scatter-gather only * * Window 0 is scatter-gather 8MB at 8MB (for isa) * Window 1 is direct access 1GB at 1GB * Window 2 is direct access 1GB at 2GB * Window 3 is scatter-gather 128MB at 3GB * ??? We ought to scale window 3 memory. * */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0); pci = WILDFIRE_pci(qbbno, hoseno); pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3; pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000; pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes); pci->pci_window[1].wbase.csr = 0x40000000 | 1; pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000; pci->pci_window[1].tbase.csr = 0; pci->pci_window[2].wbase.csr = 0x80000000 | 1; pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000; pci->pci_window[2].tbase.csr = 0x40000000; pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3; pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000; pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes); wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */ } void __init wildfire_init_pca(int qbbno, int pcano) { /* Test for PCA existence first. */ if (!WILDFIRE_PCA_EXISTS(qbbno, pcano)) return; #if DEBUG_DUMP_REGS wildfire_dump_pca_regs(qbbno, pcano); #endif /* Do both hoses of the PCA. */ wildfire_init_hose(qbbno, (pcano << 1) + 0); wildfire_init_hose(qbbno, (pcano << 1) + 1); } void __init wildfire_init_qbb(int qbbno) { int pcano; /* Test for QBB existence first. */ if (!WILDFIRE_QBB_EXISTS(qbbno)) return; #if DEBUG_DUMP_REGS wildfire_dump_qsa_regs(qbbno); wildfire_dump_qsd_regs(qbbno); wildfire_dump_iop_regs(qbbno); wildfire_dump_gp_regs(qbbno); #endif /* Init all PCAs here. */ for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) { wildfire_init_pca(qbbno, pcano); } } void __init wildfire_hardware_probe(void) { unsigned long temp; unsigned int hard_qbb, soft_qbb; wildfire_fast_qsd *fast = WILDFIRE_fast_qsd(); wildfire_qsd *qsd; wildfire_qsa *qsa; wildfire_iop *iop; wildfire_gp *gp; wildfire_ne *ne; wildfire_fe *fe; int i; temp = fast->qsd_whami.csr; #if 0 printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp); #endif hard_qbb = (temp >> 8) & 7; soft_qbb = (temp >> 4) & 7; /* Init the HW configuration variables. */ wildfire_hard_qbb_mask = (1 << hard_qbb); wildfire_soft_qbb_mask = (1 << soft_qbb); wildfire_gp_mask = 0; wildfire_hs_mask = 0; wildfire_iop_mask = 0; wildfire_ior_mask = 0; wildfire_pca_mask = 0; wildfire_cpu_mask = 0; wildfire_mem_mask = 0; memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB); memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB); /* First, determine which QBBs are present. */ qsa = WILDFIRE_qsa(soft_qbb); temp = qsa->qsa_qbb_id.csr; #if 0 printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp); #endif if (temp & 0x40) /* Is there an HS? */ wildfire_hs_mask = 1; if (temp & 0x20) { /* Is there a GP? */ gp = WILDFIRE_gp(soft_qbb); temp = 0; for (i = 0; i < 4; i++) { temp |= gp->gpa_qbb_map[i].csr << (i * 8); #if 0 printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n", i, gp, temp); #endif } for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) { if (temp & 8) { /* Is there a QBB? */ soft_qbb = temp & 7; wildfire_hard_qbb_mask |= (1 << hard_qbb); wildfire_soft_qbb_mask |= (1 << soft_qbb); } temp >>= 4; } wildfire_gp_mask = wildfire_soft_qbb_mask; } /* Next determine each QBBs resources. */ for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) { if (WILDFIRE_QBB_EXISTS(soft_qbb)) { qsd = WILDFIRE_qsd(soft_qbb); temp = qsd->qsd_whami.csr; #if 0 printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp); #endif hard_qbb = (temp >> 8) & 7; wildfire_hard_qbb_map[hard_qbb] = soft_qbb; wildfire_soft_qbb_map[soft_qbb] = hard_qbb; qsa = WILDFIRE_qsa(soft_qbb); temp = qsa->qsa_qbb_pop[0].csr; #if 0 printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp); #endif wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2); wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2); temp = qsa->qsa_qbb_pop[1].csr; #if 0 printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp); #endif wildfire_iop_mask |= (1 << soft_qbb); wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2); temp = qsa->qsa_qbb_id.csr; #if 0 printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp); #endif if (temp & 0x20) wildfire_gp_mask |= (1 << soft_qbb); /* Probe for PCA existence here. */ for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) { iop = WILDFIRE_iop(soft_qbb); ne = WILDFIRE_ne(soft_qbb, i); fe = WILDFIRE_fe(soft_qbb, i); if ((iop->iop_hose[i].init.csr & 1) == 1 && ((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) && ((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL)) { wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i); } } } } #if DEBUG_DUMP_CONFIG wildfire_dump_hardware_config(); #endif } void __init wildfire_init_arch(void) { int qbbno; /* With multiple PCI buses, we play with I/O as physical addrs. */ ioport_resource.end = ~0UL; /* Probe the hardware for info about configuration. */ wildfire_hardware_probe(); /* Now init all the found QBBs. */ for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) { wildfire_init_qbb(qbbno); } /* Normal direct PCI DMA mapping. */ __direct_map_base = 0x40000000UL; __direct_map_size = 0x80000000UL; } void wildfire_machine_check(unsigned long vector, unsigned long la_ptr) { mb(); mb(); /* magic */ draina(); /* FIXME: clear pci errors */ wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "WILDFIRE", mcheck_expected(smp_processor_id())); } void wildfire_kill_arch(int mode) { } void wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { int qbbno = hose->index >> 3; int hoseno = hose->index & 7; wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno); mb(); pci->pci_flush_tlb.csr; /* reading does the trick */ } static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, unsigned char *type1) { struct pci_controller *hose = pbus->sysdata; unsigned long addr; u8 bus = pbus->number; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " "pci_addr=0x%p, type1=0x%p)\n", bus, device_fn, where, pci_addr, type1)); if (!pbus->parent) /* No parent means peer PCI bus. */ bus = 0; *type1 = (bus != 0); addr = (bus << 16) | (device_fn << 8) | where; addr |= hose->config_space_base; *pci_addr = addr; DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } static int wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops wildfire_pci_ops = { .read = wildfire_read_config, .write = wildfire_write_config, }; /* * NUMA Support */ int wildfire_pa_to_nid(unsigned long pa) { return pa >> 36; } int wildfire_cpuid_to_nid(int cpuid) { /* assume 4 CPUs per node */ return cpuid >> 2; } unsigned long wildfire_node_mem_start(int nid) { /* 64GB per node */ return (unsigned long)nid * (64UL * 1024 * 1024 * 1024); } unsigned long wildfire_node_mem_size(int nid) { /* 64GB per node */ return 64UL * 1024 * 1024 * 1024; } #if DEBUG_DUMP_REGS static void __init wildfire_dump_pci_regs(int qbbno, int hoseno) { wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno); int i; printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n", qbbno, hoseno, pci); printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n", pci->pci_io_addr_ext.csr); printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr); printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr); printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr); printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr); printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr); printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr); printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n", qbbno, hoseno, pci); for (i = 0; i < 4; i++) { printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i, pci->pci_window[i].wbase.csr, pci->pci_window[i].wmask.csr, pci->pci_window[i].tbase.csr); } printk(KERN_ERR "\n"); } static void __init wildfire_dump_pca_regs(int qbbno, int pcano) { wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano); int i; printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n", qbbno, pcano, pca); printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr); printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr); printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr); printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr); printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n", pca->pca_stdio_edge_level.csr); printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n", qbbno, pcano, pca); for (i = 0; i < 4; i++) { printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i, pca->pca_int[i].target.csr, pca->pca_int[i].enable.csr); } printk(KERN_ERR "\n"); } static void __init wildfire_dump_qsa_regs(int qbbno) { wildfire_qsa *qsa = WILDFIRE_qsa(qbbno); int i; printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa); printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr); printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr); printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr); for (i = 0; i < 5; i++) printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n", i, qsa->qsa_config[i].csr); for (i = 0; i < 2; i++) printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n", i, qsa->qsa_qbb_pop[0].csr); printk(KERN_ERR "\n"); } static void __init wildfire_dump_qsd_regs(int qbbno) { wildfire_qsd *qsd = WILDFIRE_qsd(qbbno); printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd); printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr); printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr); printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n", qsd->qsd_port_present.csr); printk(KERN_ERR " QSD_PORT_ACTUVE: 0x%16lx\n", qsd->qsd_port_active.csr); printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n", qsd->qsd_fault_ena.csr); printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n", qsd->qsd_cpu_int_ena.csr); printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n", qsd->qsd_mem_config.csr); printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n", qsd->qsd_err_sum.csr); printk(KERN_ERR "\n"); } static void __init wildfire_dump_iop_regs(int qbbno) { wildfire_iop *iop = WILDFIRE_iop(qbbno); int i; printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop); printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr); printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr); printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n", iop->iop_switch_credits.csr); printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n", iop->iop_hose_credits.csr); for (i = 0; i < 4; i++) printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n", i, iop->iop_hose[i].init.csr); for (i = 0; i < 4; i++) printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n", i, iop->iop_dev_int[i].target.csr); printk(KERN_ERR "\n"); } static void __init wildfire_dump_gp_regs(int qbbno) { wildfire_gp *gp = WILDFIRE_gp(qbbno); int i; printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp); for (i = 0; i < 4; i++) printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n", i, gp->gpa_qbb_map[i].csr); printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n", gp->gpa_mem_pop_map.csr); printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr); printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr); printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr); printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr); printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr); printk(KERN_ERR "\n"); } #endif /* DUMP_REGS */ #if DEBUG_DUMP_CONFIG static void __init wildfire_dump_hardware_config(void) { int i; printk(KERN_ERR "Probed Hardware Configuration\n"); printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask); printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask); printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask); printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask); printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask); printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask); printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask); printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask); printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask); printk(" hard_qbb_map: "); for (i = 0; i < WILDFIRE_MAX_QBB; i++) if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY) printk("--- "); else printk("%3d ", wildfire_hard_qbb_map[i]); printk("\n"); printk(" soft_qbb_map: "); for (i = 0; i < WILDFIRE_MAX_QBB; i++) if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY) printk("--- "); else printk("%3d ", wildfire_soft_qbb_map[i]); printk("\n"); } #endif /* DUMP_CONFIG */
gpl-2.0
cile381/android_kernel_m7_gpe
drivers/misc/ibmasm/remote.c
14836
10187
/* * IBM ASM Service Processor Device Driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2004 * * Authors: Max Asböck <amax@us.ibm.com> * Vernon Mauery <vernux@us.ibm.com> * */ /* Remote mouse and keyboard event handling functions */ #include <linux/pci.h> #include "ibmasm.h" #include "remote.h" #define MOUSE_X_MAX 1600 #define MOUSE_Y_MAX 1200 static const unsigned short xlate_high[XLATE_SIZE] = { [KEY_SYM_ENTER & 0xff] = KEY_ENTER, [KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH, [KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK, [KEY_SYM_KPMINUS & 0xff] = KEY_KPMINUS, [KEY_SYM_KPDOT & 0xff] = KEY_KPDOT, [KEY_SYM_KPPLUS & 0xff] = KEY_KPPLUS, [KEY_SYM_KP0 & 0xff] = KEY_KP0, [KEY_SYM_KP1 & 0xff] = KEY_KP1, [KEY_SYM_KP2 & 0xff] = KEY_KP2, [KEY_SYM_KPDOWN & 0xff] = KEY_KP2, [KEY_SYM_KP3 & 0xff] = KEY_KP3, [KEY_SYM_KP4 & 0xff] = KEY_KP4, [KEY_SYM_KPLEFT & 0xff] = KEY_KP4, [KEY_SYM_KP5 & 0xff] = KEY_KP5, [KEY_SYM_KP6 & 0xff] = KEY_KP6, [KEY_SYM_KPRIGHT & 0xff] = KEY_KP6, [KEY_SYM_KP7 & 0xff] = KEY_KP7, [KEY_SYM_KP8 & 0xff] = KEY_KP8, [KEY_SYM_KPUP & 0xff] = KEY_KP8, [KEY_SYM_KP9 & 0xff] = KEY_KP9, [KEY_SYM_BK_SPC & 0xff] = KEY_BACKSPACE, [KEY_SYM_TAB & 0xff] = KEY_TAB, [KEY_SYM_CTRL & 0xff] = KEY_LEFTCTRL, [KEY_SYM_ALT & 0xff] = KEY_LEFTALT, [KEY_SYM_INSERT & 0xff] = KEY_INSERT, [KEY_SYM_DELETE & 0xff] = KEY_DELETE, [KEY_SYM_SHIFT & 0xff] = KEY_LEFTSHIFT, [KEY_SYM_UARROW & 0xff] = KEY_UP, [KEY_SYM_DARROW & 0xff] = KEY_DOWN, [KEY_SYM_LARROW & 0xff] = KEY_LEFT, [KEY_SYM_RARROW & 0xff] = KEY_RIGHT, [KEY_SYM_ESCAPE & 0xff] = KEY_ESC, [KEY_SYM_PAGEUP & 0xff] = KEY_PAGEUP, [KEY_SYM_PAGEDOWN & 0xff] = KEY_PAGEDOWN, [KEY_SYM_HOME & 0xff] = KEY_HOME, [KEY_SYM_END & 0xff] = KEY_END, [KEY_SYM_F1 & 0xff] = KEY_F1, [KEY_SYM_F2 & 0xff] = KEY_F2, [KEY_SYM_F3 & 0xff] = KEY_F3, [KEY_SYM_F4 & 0xff] = KEY_F4, [KEY_SYM_F5 & 0xff] = KEY_F5, [KEY_SYM_F6 & 0xff] = KEY_F6, [KEY_SYM_F7 & 0xff] = KEY_F7, [KEY_SYM_F8 & 0xff] = KEY_F8, [KEY_SYM_F9 & 0xff] = KEY_F9, [KEY_SYM_F10 & 0xff] = KEY_F10, [KEY_SYM_F11 & 0xff] = KEY_F11, [KEY_SYM_F12 & 0xff] = KEY_F12, [KEY_SYM_CAP_LOCK & 0xff] = KEY_CAPSLOCK, [KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK, [KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK, }; static const unsigned short xlate[XLATE_SIZE] = { [NO_KEYCODE] = KEY_RESERVED, [KEY_SYM_SPACE] = KEY_SPACE, [KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE, [KEY_SYM_ONE] = KEY_1, [KEY_SYM_BANG] = KEY_1, [KEY_SYM_TWO] = KEY_2, [KEY_SYM_AT] = KEY_2, [KEY_SYM_THREE] = KEY_3, [KEY_SYM_POUND] = KEY_3, [KEY_SYM_FOUR] = KEY_4, [KEY_SYM_DOLLAR] = KEY_4, [KEY_SYM_FIVE] = KEY_5, [KEY_SYM_PERCENT] = KEY_5, [KEY_SYM_SIX] = KEY_6, [KEY_SYM_CARAT] = KEY_6, [KEY_SYM_SEVEN] = KEY_7, [KEY_SYM_AMPER] = KEY_7, [KEY_SYM_EIGHT] = KEY_8, [KEY_SYM_STAR] = KEY_8, [KEY_SYM_NINE] = KEY_9, [KEY_SYM_LPAREN] = KEY_9, [KEY_SYM_ZERO] = KEY_0, [KEY_SYM_RPAREN] = KEY_0, [KEY_SYM_MINUS] = KEY_MINUS, [KEY_SYM_USCORE] = KEY_MINUS, [KEY_SYM_EQUAL] = KEY_EQUAL, [KEY_SYM_PLUS] = KEY_EQUAL, [KEY_SYM_LBRKT] = KEY_LEFTBRACE, [KEY_SYM_LCURLY] = KEY_LEFTBRACE, [KEY_SYM_RBRKT] = KEY_RIGHTBRACE, [KEY_SYM_RCURLY] = KEY_RIGHTBRACE, [KEY_SYM_SLASH] = KEY_BACKSLASH, [KEY_SYM_PIPE] = KEY_BACKSLASH, [KEY_SYM_TIC] = KEY_APOSTROPHE, [KEY_SYM_QUOTE] = KEY_APOSTROPHE, [KEY_SYM_SEMIC] = KEY_SEMICOLON, [KEY_SYM_COLON] = KEY_SEMICOLON, [KEY_SYM_COMMA] = KEY_COMMA, [KEY_SYM_LT] = KEY_COMMA, [KEY_SYM_PERIOD] = KEY_DOT, [KEY_SYM_GT] = KEY_DOT, [KEY_SYM_BSLASH] = KEY_SLASH, [KEY_SYM_QMARK] = KEY_SLASH, [KEY_SYM_A] = KEY_A, [KEY_SYM_a] = KEY_A, [KEY_SYM_B] = KEY_B, [KEY_SYM_b] = KEY_B, [KEY_SYM_C] = KEY_C, [KEY_SYM_c] = KEY_C, [KEY_SYM_D] = KEY_D, [KEY_SYM_d] = KEY_D, [KEY_SYM_E] = KEY_E, [KEY_SYM_e] = KEY_E, [KEY_SYM_F] = KEY_F, [KEY_SYM_f] = KEY_F, [KEY_SYM_G] = KEY_G, [KEY_SYM_g] = KEY_G, [KEY_SYM_H] = KEY_H, [KEY_SYM_h] = KEY_H, [KEY_SYM_I] = KEY_I, [KEY_SYM_i] = KEY_I, [KEY_SYM_J] = KEY_J, [KEY_SYM_j] = KEY_J, [KEY_SYM_K] = KEY_K, [KEY_SYM_k] = KEY_K, [KEY_SYM_L] = KEY_L, [KEY_SYM_l] = KEY_L, [KEY_SYM_M] = KEY_M, [KEY_SYM_m] = KEY_M, [KEY_SYM_N] = KEY_N, [KEY_SYM_n] = KEY_N, [KEY_SYM_O] = KEY_O, [KEY_SYM_o] = KEY_O, [KEY_SYM_P] = KEY_P, [KEY_SYM_p] = KEY_P, [KEY_SYM_Q] = KEY_Q, [KEY_SYM_q] = KEY_Q, [KEY_SYM_R] = KEY_R, [KEY_SYM_r] = KEY_R, [KEY_SYM_S] = KEY_S, [KEY_SYM_s] = KEY_S, [KEY_SYM_T] = KEY_T, [KEY_SYM_t] = KEY_T, [KEY_SYM_U] = KEY_U, [KEY_SYM_u] = KEY_U, [KEY_SYM_V] = KEY_V, [KEY_SYM_v] = KEY_V, [KEY_SYM_W] = KEY_W, [KEY_SYM_w] = KEY_W, [KEY_SYM_X] = KEY_X, [KEY_SYM_x] = KEY_X, [KEY_SYM_Y] = KEY_Y, [KEY_SYM_y] = KEY_Y, [KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z, }; static void print_input(struct remote_input *input) { if (input->type == INPUT_TYPE_MOUSE) { unsigned char buttons = input->mouse_buttons; dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n", input->data.mouse.x, input->data.mouse.y, (buttons) ? " -- buttons:" : "", (buttons & REMOTE_BUTTON_LEFT) ? "left " : "", (buttons & REMOTE_BUTTON_MIDDLE) ? "middle " : "", (buttons & REMOTE_BUTTON_RIGHT) ? "right" : "" ); } else { dbg("remote keypress (code, flag, down):" "%d (0x%x) [0x%x] [0x%x]\n", input->data.keyboard.key_code, input->data.keyboard.key_code, input->data.keyboard.key_flag, input->data.keyboard.key_down ); } } static void send_mouse_event(struct input_dev *dev, struct remote_input *input) { unsigned char buttons = input->mouse_buttons; input_report_abs(dev, ABS_X, input->data.mouse.x); input_report_abs(dev, ABS_Y, input->data.mouse.y); input_report_key(dev, BTN_LEFT, buttons & REMOTE_BUTTON_LEFT); input_report_key(dev, BTN_MIDDLE, buttons & REMOTE_BUTTON_MIDDLE); input_report_key(dev, BTN_RIGHT, buttons & REMOTE_BUTTON_RIGHT); input_sync(dev); } static void send_keyboard_event(struct input_dev *dev, struct remote_input *input) { unsigned int key; unsigned short code = input->data.keyboard.key_code; if (code & 0xff00) key = xlate_high[code & 0xff]; else key = xlate[code]; input_report_key(dev, key, input->data.keyboard.key_down); input_sync(dev); } void ibmasm_handle_mouse_interrupt(struct service_processor *sp) { unsigned long reader; unsigned long writer; struct remote_input input; reader = get_queue_reader(sp); writer = get_queue_writer(sp); while (reader != writer) { memcpy_fromio(&input, get_queue_entry(sp, reader), sizeof(struct remote_input)); print_input(&input); if (input.type == INPUT_TYPE_MOUSE) { send_mouse_event(sp->remote.mouse_dev, &input); } else if (input.type == INPUT_TYPE_KEYBOARD) { send_keyboard_event(sp->remote.keybd_dev, &input); } else break; reader = advance_queue_reader(sp, reader); writer = get_queue_writer(sp); } } int ibmasm_init_remote_input_dev(struct service_processor *sp) { /* set up the mouse input device */ struct input_dev *mouse_dev, *keybd_dev; struct pci_dev *pdev = to_pci_dev(sp->dev); int error = -ENOMEM; int i; sp->remote.mouse_dev = mouse_dev = input_allocate_device(); sp->remote.keybd_dev = keybd_dev = input_allocate_device(); if (!mouse_dev || !keybd_dev) goto err_free_devices; mouse_dev->id.bustype = BUS_PCI; mouse_dev->id.vendor = pdev->vendor; mouse_dev->id.product = pdev->device; mouse_dev->id.version = 1; mouse_dev->dev.parent = sp->dev; mouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); mouse_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); set_bit(BTN_TOUCH, mouse_dev->keybit); mouse_dev->name = "ibmasm RSA I remote mouse"; input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0); input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0); keybd_dev->id.bustype = BUS_PCI; keybd_dev->id.vendor = pdev->vendor; keybd_dev->id.product = pdev->device; keybd_dev->id.version = 2; keybd_dev->dev.parent = sp->dev; keybd_dev->evbit[0] = BIT_MASK(EV_KEY); keybd_dev->name = "ibmasm RSA I remote keyboard"; for (i = 0; i < XLATE_SIZE; i++) { if (xlate_high[i]) set_bit(xlate_high[i], keybd_dev->keybit); if (xlate[i]) set_bit(xlate[i], keybd_dev->keybit); } error = input_register_device(mouse_dev); if (error) goto err_free_devices; error = input_register_device(keybd_dev); if (error) goto err_unregister_mouse_dev; enable_mouse_interrupts(sp); printk(KERN_INFO "ibmasm remote responding to events on RSA card %d\n", sp->number); return 0; err_unregister_mouse_dev: input_unregister_device(mouse_dev); mouse_dev = NULL; /* so we don't try to free it again below */ err_free_devices: input_free_device(mouse_dev); input_free_device(keybd_dev); return error; } void ibmasm_free_remote_input_dev(struct service_processor *sp) { disable_mouse_interrupts(sp); input_unregister_device(sp->remote.mouse_dev); input_unregister_device(sp->remote.keybd_dev); }
gpl-2.0
Zoldyck07/Evolution
sound/core/oss/rate.c
14836
9929
/* * Rate conversion Plug-In * Copyright (c) 1999 by Jaroslav Kysela <perex@perex.cz> * * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Library General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/pcm.h> #include "pcm_plugin.h" #define SHIFT 11 #define BITS (1<<SHIFT) #define R_MASK (BITS-1) /* * Basic rate conversion plugin */ struct rate_channel { signed short last_S1; signed short last_S2; }; typedef void (*rate_f)(struct snd_pcm_plugin *plugin, const struct snd_pcm_plugin_channel *src_channels, struct snd_pcm_plugin_channel *dst_channels, int src_frames, int dst_frames); struct rate_priv { unsigned int pitch; unsigned int pos; rate_f func; snd_pcm_sframes_t old_src_frames, old_dst_frames; struct rate_channel channels[0]; }; static void rate_init(struct snd_pcm_plugin *plugin) { unsigned int channel; struct rate_priv *data = (struct rate_priv *)plugin->extra_data; data->pos = 0; for (channel = 0; channel < plugin->src_format.channels; channel++) { data->channels[channel].last_S1 = 0; data->channels[channel].last_S2 = 0; } } static void resample_expand(struct snd_pcm_plugin *plugin, const struct snd_pcm_plugin_channel *src_channels, struct snd_pcm_plugin_channel *dst_channels, int src_frames, int dst_frames) { unsigned int pos = 0; signed int val; signed short S1, S2; signed short *src, *dst; unsigned int channel; int src_step, dst_step; int src_frames1, dst_frames1; struct rate_priv *data = (struct rate_priv *)plugin->extra_data; struct rate_channel *rchannels = data->channels; for (channel = 0; channel < plugin->src_format.channels; channel++) { pos = data->pos; S1 = rchannels->last_S1; S2 = rchannels->last_S2; if (!src_channels[channel].enabled) { if (dst_channels[channel].wanted) snd_pcm_area_silence(&dst_channels[channel].area, 0, dst_frames, plugin->dst_format.format); dst_channels[channel].enabled = 0; continue; } dst_channels[channel].enabled = 1; src = (signed short *)src_channels[channel].area.addr + src_channels[channel].area.first / 8 / 2; dst = (signed short *)dst_channels[channel].area.addr + dst_channels[channel].area.first / 8 / 2; src_step = src_channels[channel].area.step / 8 / 2; dst_step = dst_channels[channel].area.step / 8 / 2; src_frames1 = src_frames; dst_frames1 = dst_frames; while (dst_frames1-- > 0) { if (pos & ~R_MASK) { pos &= R_MASK; S1 = S2; if (src_frames1-- > 0) { S2 = *src; src += src_step; } } val = S1 + ((S2 - S1) * (signed int)pos) / BITS; if (val < -32768) val = -32768; else if (val > 32767) val = 32767; *dst = val; dst += dst_step; pos += data->pitch; } rchannels->last_S1 = S1; rchannels->last_S2 = S2; rchannels++; } data->pos = pos; } static void resample_shrink(struct snd_pcm_plugin *plugin, const struct snd_pcm_plugin_channel *src_channels, struct snd_pcm_plugin_channel *dst_channels, int src_frames, int dst_frames) { unsigned int pos = 0; signed int val; signed short S1, S2; signed short *src, *dst; unsigned int channel; int src_step, dst_step; int src_frames1, dst_frames1; struct rate_priv *data = (struct rate_priv *)plugin->extra_data; struct rate_channel *rchannels = data->channels; for (channel = 0; channel < plugin->src_format.channels; ++channel) { pos = data->pos; S1 = rchannels->last_S1; S2 = rchannels->last_S2; if (!src_channels[channel].enabled) { if (dst_channels[channel].wanted) snd_pcm_area_silence(&dst_channels[channel].area, 0, dst_frames, plugin->dst_format.format); dst_channels[channel].enabled = 0; continue; } dst_channels[channel].enabled = 1; src = (signed short *)src_channels[channel].area.addr + src_channels[channel].area.first / 8 / 2; dst = (signed short *)dst_channels[channel].area.addr + dst_channels[channel].area.first / 8 / 2; src_step = src_channels[channel].area.step / 8 / 2; dst_step = dst_channels[channel].area.step / 8 / 2; src_frames1 = src_frames; dst_frames1 = dst_frames; while (dst_frames1 > 0) { S1 = S2; if (src_frames1-- > 0) { S2 = *src; src += src_step; } if (pos & ~R_MASK) { pos &= R_MASK; val = S1 + ((S2 - S1) * (signed int)pos) / BITS; if (val < -32768) val = -32768; else if (val > 32767) val = 32767; *dst = val; dst += dst_step; dst_frames1--; } pos += data->pitch; } rchannels->last_S1 = S1; rchannels->last_S2 = S2; rchannels++; } data->pos = pos; } static snd_pcm_sframes_t rate_src_frames(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t frames) { struct rate_priv *data; snd_pcm_sframes_t res; if (snd_BUG_ON(!plugin)) return -ENXIO; if (frames == 0) return 0; data = (struct rate_priv *)plugin->extra_data; if (plugin->src_format.rate < plugin->dst_format.rate) { res = (((frames * data->pitch) + (BITS/2)) >> SHIFT); } else { res = (((frames << SHIFT) + (data->pitch / 2)) / data->pitch); } if (data->old_src_frames > 0) { snd_pcm_sframes_t frames1 = frames, res1 = data->old_dst_frames; while (data->old_src_frames < frames1) { frames1 >>= 1; res1 <<= 1; } while (data->old_src_frames > frames1) { frames1 <<= 1; res1 >>= 1; } if (data->old_src_frames == frames1) return res1; } data->old_src_frames = frames; data->old_dst_frames = res; return res; } static snd_pcm_sframes_t rate_dst_frames(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t frames) { struct rate_priv *data; snd_pcm_sframes_t res; if (snd_BUG_ON(!plugin)) return -ENXIO; if (frames == 0) return 0; data = (struct rate_priv *)plugin->extra_data; if (plugin->src_format.rate < plugin->dst_format.rate) { res = (((frames << SHIFT) + (data->pitch / 2)) / data->pitch); } else { res = (((frames * data->pitch) + (BITS/2)) >> SHIFT); } if (data->old_dst_frames > 0) { snd_pcm_sframes_t frames1 = frames, res1 = data->old_src_frames; while (data->old_dst_frames < frames1) { frames1 >>= 1; res1 <<= 1; } while (data->old_dst_frames > frames1) { frames1 <<= 1; res1 >>= 1; } if (data->old_dst_frames == frames1) return res1; } data->old_dst_frames = frames; data->old_src_frames = res; return res; } static snd_pcm_sframes_t rate_transfer(struct snd_pcm_plugin *plugin, const struct snd_pcm_plugin_channel *src_channels, struct snd_pcm_plugin_channel *dst_channels, snd_pcm_uframes_t frames) { snd_pcm_uframes_t dst_frames; struct rate_priv *data; if (snd_BUG_ON(!plugin || !src_channels || !dst_channels)) return -ENXIO; if (frames == 0) return 0; #ifdef CONFIG_SND_DEBUG { unsigned int channel; for (channel = 0; channel < plugin->src_format.channels; channel++) { if (snd_BUG_ON(src_channels[channel].area.first % 8 || src_channels[channel].area.step % 8)) return -ENXIO; if (snd_BUG_ON(dst_channels[channel].area.first % 8 || dst_channels[channel].area.step % 8)) return -ENXIO; } } #endif dst_frames = rate_dst_frames(plugin, frames); if (dst_frames > dst_channels[0].frames) dst_frames = dst_channels[0].frames; data = (struct rate_priv *)plugin->extra_data; data->func(plugin, src_channels, dst_channels, frames, dst_frames); return dst_frames; } static int rate_action(struct snd_pcm_plugin *plugin, enum snd_pcm_plugin_action action, unsigned long udata) { if (snd_BUG_ON(!plugin)) return -ENXIO; switch (action) { case INIT: case PREPARE: rate_init(plugin); break; default: break; } return 0; /* silenty ignore other actions */ } int snd_pcm_plugin_build_rate(struct snd_pcm_substream *plug, struct snd_pcm_plugin_format *src_format, struct snd_pcm_plugin_format *dst_format, struct snd_pcm_plugin **r_plugin) { int err; struct rate_priv *data; struct snd_pcm_plugin *plugin; if (snd_BUG_ON(!r_plugin)) return -ENXIO; *r_plugin = NULL; if (snd_BUG_ON(src_format->channels != dst_format->channels)) return -ENXIO; if (snd_BUG_ON(src_format->channels <= 0)) return -ENXIO; if (snd_BUG_ON(src_format->format != SNDRV_PCM_FORMAT_S16)) return -ENXIO; if (snd_BUG_ON(dst_format->format != SNDRV_PCM_FORMAT_S16)) return -ENXIO; if (snd_BUG_ON(src_format->rate == dst_format->rate)) return -ENXIO; err = snd_pcm_plugin_build(plug, "rate conversion", src_format, dst_format, sizeof(struct rate_priv) + src_format->channels * sizeof(struct rate_channel), &plugin); if (err < 0) return err; data = (struct rate_priv *)plugin->extra_data; if (src_format->rate < dst_format->rate) { data->pitch = ((src_format->rate << SHIFT) + (dst_format->rate >> 1)) / dst_format->rate; data->func = resample_expand; } else { data->pitch = ((dst_format->rate << SHIFT) + (src_format->rate >> 1)) / src_format->rate; data->func = resample_shrink; } data->pos = 0; rate_init(plugin); data->old_src_frames = data->old_dst_frames = 0; plugin->transfer = rate_transfer; plugin->src_frames = rate_src_frames; plugin->dst_frames = rate_dst_frames; plugin->action = rate_action; *r_plugin = plugin; return 0; }
gpl-2.0
theophile/SM-N920R7_MM_Kernel
drivers/gpu/arm/midgard_wk04/platform/gpu_control_exynos5422.c
245
11767
/* drivers/gpu/t6xx/kbase/src/platform/gpu_control_exynos5422.c * * Copyright 2011 by S.LSI. Samsung Electronics Inc. * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea * * Samsung SoC Mali-T604 DVFS driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software FoundatIon. */ /** * @file gpu_control_exynos5422.c * DVFS */ #include <mali_kbase.h> #include <linux/regulator/driver.h> #include <mach/asv-exynos.h> #include <mach/pm_domains.h> #include "mali_kbase_platform.h" #include "gpu_dvfs_handler.h" #include "gpu_control.h" extern struct kbase_device *pkbdev; #ifdef CONFIG_PM_RUNTIME struct exynos_pm_domain *gpu_get_pm_domain(kbase_device *kbdev) { struct platform_device *pdev = NULL; struct device_node *np = NULL; struct exynos_pm_domain *pd_temp, *pd = NULL; for_each_compatible_node(np, NULL, "samsung,exynos-pd") { if (!of_device_is_available(np)) continue; pdev = of_find_device_by_node(np); pd_temp = platform_get_drvdata(pdev); if (!strcmp("pd-g3d", pd_temp->genpd.name)) { pd = pd_temp; break; } } return pd; } #endif int get_cpu_clock_speed(u32 *cpu_clock) { struct clk *cpu_clk; u32 freq = 0; cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return -1; freq = clk_get_rate(cpu_clk); *cpu_clock = (freq/MHZ); return 0; } int gpu_is_power_on(void) { return ((__raw_readl(EXYNOS5422_G3D_STATUS) & EXYNOS_INT_LOCAL_PWR_EN) == EXYNOS_INT_LOCAL_PWR_EN) ? 1 : 0; } int gpu_power_init(kbase_device *kbdev) { struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; if (!platform) return -ENODEV; GPU_LOG(DVFS_INFO, "g3d power initialized\n"); return 0; } static int gpu_update_clock(struct exynos_context *platform) { if (!platform->clk_g3d_ip) { GPU_LOG(DVFS_ERROR, "clk_g3d_ip is not initialized\n"); return -1; } platform->cur_clock = clk_get_rate(platform->clk_g3d_ip)/MHZ; return 0; } int gpu_is_clock_on(struct exynos_context *platform) { if (!platform) return -ENODEV; return __clk_is_enabled(platform->clk_g3d_ip); } int gpu_clock_on(struct exynos_context *platform) { if (!platform) return -ENODEV; if (!gpu_is_power_on()) { GPU_LOG(DVFS_WARNING, "can't set clock on in g3d power off status\n"); return -1; } if (platform->clk_g3d_status == 1) return 0; if (platform->clk_g3d_ip) { (void) clk_prepare_enable(platform->clk_g3d_ip); KBASE_TRACE_ADD_EXYNOS(pkbdev, LSI_CLOCK_ON, NULL, NULL, 0u, 0u); } platform->clk_g3d_status = 1; return 0; } int gpu_clock_off(struct exynos_context *platform) { if (!platform) return -ENODEV; if (platform->clk_g3d_status == 0) return 0; if (platform->clk_g3d_ip) { (void)clk_disable_unprepare(platform->clk_g3d_ip); KBASE_TRACE_ADD_EXYNOS(pkbdev, LSI_CLOCK_OFF, NULL, NULL, 0u, 0u); } platform->clk_g3d_status = 0; return 0; } unsigned long get_dpll_freq(int curr, int targ) { unsigned long dpll_clk; int divider; switch(targ) { case 480: case 420: case 350: divider = 2; break; case 266: divider = 2 + (targ < curr ? 0:1); break; case 177: divider = 3 + (targ < curr ? 0:1); break; case 100: divider = 4; break; default: divider = 1; break; } dpll_clk = curr / divider + 5; return (dpll_clk*1000000); } int gpu_register_dump(void) { #ifdef CONFIG_MALI_EXYNOS_TRACE if (gpu_is_power_on()) { /* G3D PMU */ KBASE_TRACE_ADD_EXYNOS(pkbdev, LSI_REGISTER_DUMP, NULL, NULL, 0x10044084, __raw_readl(EXYNOS5422_G3D_STATUS)); /* G3D SRC */ KBASE_TRACE_ADD_EXYNOS(pkbdev, LSI_REGISTER_DUMP, NULL, NULL, 0x10020208, __raw_readl(EXYNOS5_CLK_SRC_TOP2)); KBASE_TRACE_ADD_EXYNOS(pkbdev, LSI_REGISTER_DUMP, NULL, NULL, 0x10020214, __raw_readl(EXYNOS5_CLK_SRC_TOP5)); KBASE_TRACE_ADD_EXYNOS(pkbdev, LSI_REGISTER_DUMP, NULL, NULL, 0x10020288, __raw_readl(EXYNOS5_CLK_SRC_TOP12)); /* G3D DIV */ KBASE_TRACE_ADD_EXYNOS(pkbdev, LSI_REGISTER_DUMP, NULL, NULL, 0x10020608, __raw_readl(EXYNOS5_CLK_DIV_STAT_TOP2)); /* G3D MUX */ KBASE_TRACE_ADD_EXYNOS(pkbdev, LSI_REGISTER_DUMP, NULL, NULL, 0x10020414, __raw_readl(EXYNOS5_CLK_MUX_STAT_TOP5)); } #endif /* CONFIG_MALI_EXYNOS_TRACE */ return 0; } int gpu_set_clock(struct exynos_context *platform, int freq) { long g3d_rate_prev = -1; unsigned long g3d_rate = freq * MHZ; unsigned long tmp = 0; int ret; if (platform->clk_g3d_ip == 0) return -1; #ifdef CONFIG_PM_RUNTIME if (platform->exynos_pm_domain) mutex_lock(&platform->exynos_pm_domain->access_lock); #endif /* CONFIG_PM_RUNTIME */ if (!gpu_is_power_on()) { ret = -1; GPU_LOG(DVFS_WARNING, "gpu_set_clk_vol in the G3D power-off state!\n"); goto err; } if (!gpu_is_clock_on(platform)) { ret = -1; GPU_LOG(DVFS_WARNING, "gpu_set_clk_vol in the G3D clock-off state!\n"); goto err; } g3d_rate_prev = clk_get_rate(platform->fout_vpll)/MHZ; /* if changed the VPLL rate, set rate for VPLL and wait for lock time */ if (freq != g3d_rate_prev) { /*for stable clock input.*/ ret = clk_set_rate(platform->dout_aclk_g3d, get_dpll_freq(clk_get_rate(platform->clk_g3d_ip)/1000000, freq)); if (ret < 0) { GPU_LOG(DVFS_ERROR, "failed to clk_set_rate [dout_aclk_g3d]\n"); goto err; } /*change here for future stable clock changing*/ ret = clk_set_parent(platform->mout_aclk_g3d, platform->mout_dpll_ctrl); if (ret < 0) { GPU_LOG(DVFS_ERROR, "failed to clk_set_parent [mout_aclk_g3d]\n"); goto err; } /*change g3d pll*/ ret = clk_set_rate(platform->fout_vpll, g3d_rate); if (ret < 0) { GPU_LOG(DVFS_ERROR, "failed to clk_set_rate [fout_vpll]\n"); goto err; } /*restore parent*/ ret = clk_set_parent(platform->mout_aclk_g3d, platform->mout_vpll_ctrl); if (ret < 0) { GPU_LOG(DVFS_ERROR, "failed to clk_set_parent [mout_aclk_g3d]\n"); goto err; } g3d_rate_prev = g3d_rate; } ret = clk_set_rate(platform->dout_aclk_g3d, g3d_rate); if (ret < 0) { GPU_LOG(DVFS_ERROR, "failed to clk_set_rate [dout_aclk_g3d]\n"); goto err; } /* Waiting for clock is stable */ do { tmp = __raw_readl(EXYNOS5_CLK_DIV_STAT_TOP2); } while (tmp & 0x10000); gpu_update_clock(platform); #ifdef CONFIG_MALI_EXYNOS_TRACE KBASE_TRACE_ADD_EXYNOS(pkbdev, LSI_CLOCK_VALUE, NULL, NULL, 0u, g3d_rate/MHZ); #endif /* CONFIG_MALI_EXYNOS_TRACE */ GPU_LOG(DVFS_DEBUG, "[G3D] clock set: %ld\n", g3d_rate / MHZ); GPU_LOG(DVFS_DEBUG, "[G3D] clock get: %d\n", platform->cur_clock); err: #ifdef CONFIG_PM_RUNTIME if (platform->exynos_pm_domain) mutex_unlock(&platform->exynos_pm_domain->access_lock); #endif /* CONFIG_PM_RUNTIME */ return ret; } static int gpu_get_clock(kbase_device *kbdev) { struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; if (!platform) return -ENODEV; KBASE_DEBUG_ASSERT(kbdev != NULL); /* * EXYNOS5422 3D clock description * normal usage: mux(vpll) -> divider -> mux_sw -> mux_user -> aclk_g3d * on clock changing: mux(dpll) -> divider(3) -> mux_sw -> mux_user -> aclk_g3d */ platform->fout_vpll = clk_get(NULL, "fout_vpll"); if (IS_ERR(platform->fout_vpll)) { GPU_LOG(DVFS_ERROR, "failed to clk_get [fout_vpll]\n"); return -1; } platform->mout_vpll_ctrl = clk_get(kbdev->osdev.dev, "mout_vpll_ctrl"); /* same as sclk_vpll */ if (IS_ERR(platform->mout_vpll_ctrl)) { GPU_LOG(DVFS_ERROR, "failed to clk_get [mout_vpll_ctrl]\n"); return -1; } platform->mout_dpll_ctrl = clk_get(kbdev->osdev.dev, "mout_dpll_ctrl"); /* same as sclk_dpll */ if (IS_ERR(platform->mout_dpll_ctrl)) { GPU_LOG(DVFS_ERROR, "failed to clk_get [mout_dpll_ctrl]\n"); return -1; } platform->mout_aclk_g3d = clk_get(kbdev->osdev.dev, "mout_aclk_g3d"); /* set parents v or d pll */ if (IS_ERR(platform->mout_aclk_g3d)) { GPU_LOG(DVFS_ERROR, "failed to clk_get [mout_aclk_g3d]\n"); return -1; } platform->dout_aclk_g3d = clk_get(kbdev->osdev.dev, "dout_aclk_g3d"); /* divider usage */ if (IS_ERR(platform->dout_aclk_g3d)) { GPU_LOG(DVFS_ERROR, "failed to clk_get [dout_aclk_g3d]\n"); return -1; } platform->mout_aclk_g3d_sw = clk_get(kbdev->osdev.dev, "mout_aclk_g3d_sw"); if (IS_ERR(platform->mout_aclk_g3d_sw)) { GPU_LOG(DVFS_ERROR, "failed to clk_get [mout_aclk_g3d_sw]\n"); return -1; } platform->mout_aclk_g3d_user = clk_get(kbdev->osdev.dev, "mout_aclk_g3d_user"); if (IS_ERR(platform->mout_aclk_g3d_user)) { GPU_LOG(DVFS_ERROR, "failed to clk_get [mout_aclk_g3d_user]\n"); return -1; } platform->clk_g3d_ip = clk_get(kbdev->osdev.dev, "clk_g3d_ip"); clk_prepare_enable(platform->clk_g3d_ip); if (IS_ERR(platform->clk_g3d_ip)) { GPU_LOG(DVFS_ERROR, "failed to clk_get [clk_g3d_ip]\n"); return -1; } else { platform->clk_g3d_status = 1; } return 0; } int gpu_clock_init(kbase_device *kbdev) { int ret; struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; if (!platform) return -ENODEV; KBASE_DEBUG_ASSERT(kbdev != NULL); ret = gpu_get_clock(kbdev); if (ret < 0) return -1; GPU_LOG(DVFS_INFO, "g3d clock initialized\n"); return 0; } static int gpu_update_voltage(struct exynos_context *platform) { #ifdef CONFIG_REGULATOR if (!platform->g3d_regulator) { GPU_LOG(DVFS_ERROR, "g3d_regulator is not initialized\n"); return -1; } platform->cur_voltage = regulator_get_voltage(platform->g3d_regulator); #endif /* CONFIG_REGULATOR */ return 0; } int gpu_set_voltage(struct exynos_context *platform, int vol) { static int _vol = -1; if (_vol == vol) return 0; #ifdef CONFIG_REGULATOR if (!platform->g3d_regulator) { GPU_LOG(DVFS_ERROR, "g3d_regulator is not initialized\n"); return -1; } if (regulator_set_voltage(platform->g3d_regulator, vol, vol) != 0) { GPU_LOG(DVFS_ERROR, "failed to set voltage, voltage: %d\n", vol); return -1; } #endif /* CONFIG_REGULATOR */ _vol = vol; gpu_update_voltage(platform); #ifdef CONFIG_MALI_EXYNOS_TRACE KBASE_TRACE_ADD_EXYNOS(pkbdev, LSI_VOL_VALUE, NULL, NULL, 0u, vol); #endif /* CONFIG_MALI_EXYNOS_TRACE */ GPU_LOG(DVFS_DEBUG, "[G3D] voltage set:%d\n", vol); GPU_LOG(DVFS_DEBUG, "[G3D] voltage get:%d\n", platform->cur_voltage); return 0; } #ifdef CONFIG_REGULATOR int gpu_regulator_enable(struct exynos_context *platform) { if (!platform->g3d_regulator) { GPU_LOG(DVFS_ERROR, "g3d_regulator is not initialized\n"); return -1; } if (regulator_enable(platform->g3d_regulator) != 0) { GPU_LOG(DVFS_ERROR, "failed to enable g3d regulator\n"); return -1; } return 0; } int gpu_regulator_disable(struct exynos_context *platform) { if (!platform->g3d_regulator) { GPU_LOG(DVFS_ERROR, "g3d_regulator is not initialized\n"); return -1; } if (regulator_disable(platform->g3d_regulator) != 0) { GPU_LOG(DVFS_ERROR, "failed to disable g3d regulator\n"); return -1; } return 0; } int gpu_regulator_init(struct exynos_context *platform) { int gpu_voltage = 0; platform->g3d_regulator = regulator_get(NULL, "vdd_g3d"); if (IS_ERR(platform->g3d_regulator)) { GPU_LOG(DVFS_ERROR, "failed to get mali t6xx regulator, 0x%p\n", platform->g3d_regulator); platform->g3d_regulator = NULL; return -1; } if (gpu_regulator_enable(platform) != 0) { GPU_LOG(DVFS_ERROR, "failed to enable mali t6xx regulator\n"); platform->g3d_regulator = NULL; return -1; } gpu_voltage = get_match_volt(ID_G3D, MALI_DVFS_BL_CONFIG_FREQ*1000); if (gpu_voltage == 0) gpu_voltage = GPU_DEFAULT_VOLTAGE; if (gpu_set_voltage(platform, gpu_voltage) != 0) { GPU_LOG(DVFS_ERROR, "failed to set mali t6xx operating voltage [%d]\n", gpu_voltage); return -1; } GPU_LOG(DVFS_INFO, "g3d regulator initialized\n"); return 0; } #endif /* CONFIG_REGULATOR */
gpl-2.0
Happy-Ferret/Kernel-Experiments
fs/btrfs/ordered-data.c
245
32323
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include "ctree.h" #include "transaction.h" #include "btrfs_inode.h" #include "extent_io.h" #include "disk-io.h" static struct kmem_cache *btrfs_ordered_extent_cache; static u64 entry_end(struct btrfs_ordered_extent *entry) { if (entry->file_offset + entry->len < entry->file_offset) return (u64)-1; return entry->file_offset + entry->len; } /* returns NULL if the insertion worked, or it returns the node it did find * in the tree */ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, struct rb_node *node) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct btrfs_ordered_extent *entry; while (*p) { parent = *p; entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); if (file_offset < entry->file_offset) p = &(*p)->rb_left; else if (file_offset >= entry_end(entry)) p = &(*p)->rb_right; else return parent; } rb_link_node(node, parent, p); rb_insert_color(node, root); return NULL; } static void ordered_data_tree_panic(struct inode *inode, int errno, u64 offset) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset " "%llu\n", offset); } /* * look for a given offset in the tree, and if it can't be found return the * first lesser offset */ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, struct rb_node **prev_ret) { struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *test; struct btrfs_ordered_extent *entry; struct btrfs_ordered_extent *prev_entry = NULL; while (n) { entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); prev = n; prev_entry = entry; if (file_offset < entry->file_offset) n = n->rb_left; else if (file_offset >= entry_end(entry)) n = n->rb_right; else return n; } if (!prev_ret) return NULL; while (prev && file_offset >= entry_end(prev_entry)) { test = rb_next(prev); if (!test) break; prev_entry = rb_entry(test, struct btrfs_ordered_extent, rb_node); if (file_offset < entry_end(prev_entry)) break; prev = test; } if (prev) prev_entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); while (prev && file_offset < entry_end(prev_entry)) { test = rb_prev(prev); if (!test) break; prev_entry = rb_entry(test, struct btrfs_ordered_extent, rb_node); prev = test; } *prev_ret = prev; return NULL; } /* * helper to check if a given offset is inside a given entry */ static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) { if (file_offset < entry->file_offset || entry->file_offset + entry->len <= file_offset) return 0; return 1; } static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, u64 len) { if (file_offset + len <= entry->file_offset || entry->file_offset + entry->len <= file_offset) return 0; return 1; } /* * look find the first ordered struct that has this offset, otherwise * the first one less than this offset */ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, u64 file_offset) { struct rb_root *root = &tree->tree; struct rb_node *prev = NULL; struct rb_node *ret; struct btrfs_ordered_extent *entry; if (tree->last) { entry = rb_entry(tree->last, struct btrfs_ordered_extent, rb_node); if (offset_in_entry(entry, file_offset)) return tree->last; } ret = __tree_search(root, file_offset, &prev); if (!ret) ret = prev; if (ret) tree->last = ret; return ret; } /* allocate and add a new ordered_extent into the per-inode tree. * file_offset is the logical offset in the file * * start is the disk block number of an extent already reserved in the * extent allocation tree * * len is the length of the extent * * The tree is given a single reference on the ordered extent that was * inserted. */ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type, int dio, int compress_type) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry; tree = &BTRFS_I(inode)->ordered_tree; entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); if (!entry) return -ENOMEM; entry->file_offset = file_offset; entry->start = start; entry->len = len; if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) && !(type == BTRFS_ORDERED_NOCOW)) entry->csum_bytes_left = disk_len; entry->disk_len = disk_len; entry->bytes_left = len; entry->inode = igrab(inode); entry->compress_type = compress_type; entry->truncated_len = (u64)-1; if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) set_bit(type, &entry->flags); if (dio) set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); /* one ref for the tree */ atomic_set(&entry->refs, 1); init_waitqueue_head(&entry->wait); INIT_LIST_HEAD(&entry->list); INIT_LIST_HEAD(&entry->root_extent_list); INIT_LIST_HEAD(&entry->work_list); init_completion(&entry->completion); INIT_LIST_HEAD(&entry->log_list); trace_btrfs_ordered_extent_add(inode, entry); spin_lock_irq(&tree->lock); node = tree_insert(&tree->tree, file_offset, &entry->rb_node); if (node) ordered_data_tree_panic(inode, -EEXIST, file_offset); spin_unlock_irq(&tree->lock); spin_lock(&root->ordered_extent_lock); list_add_tail(&entry->root_extent_list, &root->ordered_extents); root->nr_ordered_extents++; if (root->nr_ordered_extents == 1) { spin_lock(&root->fs_info->ordered_root_lock); BUG_ON(!list_empty(&root->ordered_root)); list_add_tail(&root->ordered_root, &root->fs_info->ordered_roots); spin_unlock(&root->fs_info->ordered_root_lock); } spin_unlock(&root->ordered_extent_lock); return 0; } int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, disk_len, type, 0, BTRFS_COMPRESS_NONE); } int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, disk_len, type, 1, BTRFS_COMPRESS_NONE); } int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type, int compress_type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, disk_len, type, 0, compress_type); } /* * Add a struct btrfs_ordered_sum into the list of checksums to be inserted * when an ordered extent is finished. If the list covers more than one * ordered extent, it is split across multiples. */ void btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum) { struct btrfs_ordered_inode_tree *tree; tree = &BTRFS_I(inode)->ordered_tree; spin_lock_irq(&tree->lock); list_add_tail(&sum->list, &entry->list); WARN_ON(entry->csum_bytes_left < sum->len); entry->csum_bytes_left -= sum->len; if (entry->csum_bytes_left == 0) wake_up(&entry->wait); spin_unlock_irq(&tree->lock); } /* * this is used to account for finished IO across a given range * of the file. The IO may span ordered extents. If * a given ordered_extent is completely done, 1 is returned, otherwise * 0. * * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used * to make sure this function only returns 1 once for a given ordered extent. * * file_offset is updated to one byte past the range that is recorded as * complete. This allows you to walk forward in the file. */ int btrfs_dec_test_first_ordered_pending(struct inode *inode, struct btrfs_ordered_extent **cached, u64 *file_offset, u64 io_size, int uptodate) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; int ret; unsigned long flags; u64 dec_end; u64 dec_start; u64 to_dec; tree = &BTRFS_I(inode)->ordered_tree; spin_lock_irqsave(&tree->lock, flags); node = tree_search(tree, *file_offset); if (!node) { ret = 1; goto out; } entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (!offset_in_entry(entry, *file_offset)) { ret = 1; goto out; } dec_start = max(*file_offset, entry->file_offset); dec_end = min(*file_offset + io_size, entry->file_offset + entry->len); *file_offset = dec_end; if (dec_start > dec_end) { btrfs_crit(BTRFS_I(inode)->root->fs_info, "bad ordering dec_start %llu end %llu", dec_start, dec_end); } to_dec = dec_end - dec_start; if (to_dec > entry->bytes_left) { btrfs_crit(BTRFS_I(inode)->root->fs_info, "bad ordered accounting left %llu size %llu", entry->bytes_left, to_dec); } entry->bytes_left -= to_dec; if (!uptodate) set_bit(BTRFS_ORDERED_IOERR, &entry->flags); if (entry->bytes_left == 0) ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); else ret = 1; out: if (!ret && cached && entry) { *cached = entry; atomic_inc(&entry->refs); } spin_unlock_irqrestore(&tree->lock, flags); return ret == 0; } /* * this is used to account for finished IO across a given range * of the file. The IO should not span ordered extents. If * a given ordered_extent is completely done, 1 is returned, otherwise * 0. * * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used * to make sure this function only returns 1 once for a given ordered extent. */ int btrfs_dec_test_ordered_pending(struct inode *inode, struct btrfs_ordered_extent **cached, u64 file_offset, u64 io_size, int uptodate) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; unsigned long flags; int ret; tree = &BTRFS_I(inode)->ordered_tree; spin_lock_irqsave(&tree->lock, flags); if (cached && *cached) { entry = *cached; goto have_entry; } node = tree_search(tree, file_offset); if (!node) { ret = 1; goto out; } entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); have_entry: if (!offset_in_entry(entry, file_offset)) { ret = 1; goto out; } if (io_size > entry->bytes_left) { btrfs_crit(BTRFS_I(inode)->root->fs_info, "bad ordered accounting left %llu size %llu", entry->bytes_left, io_size); } entry->bytes_left -= io_size; if (!uptodate) set_bit(BTRFS_ORDERED_IOERR, &entry->flags); if (entry->bytes_left == 0) ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); else ret = 1; out: if (!ret && cached && entry) { *cached = entry; atomic_inc(&entry->refs); } spin_unlock_irqrestore(&tree->lock, flags); return ret == 0; } /* Needs to either be called under a log transaction or the log_mutex */ void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode) { struct btrfs_ordered_inode_tree *tree; struct btrfs_ordered_extent *ordered; struct rb_node *n; int index = log->log_transid % 2; tree = &BTRFS_I(inode)->ordered_tree; spin_lock_irq(&tree->lock); for (n = rb_first(&tree->tree); n; n = rb_next(n)) { ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); spin_lock(&log->log_extents_lock[index]); if (list_empty(&ordered->log_list)) { list_add_tail(&ordered->log_list, &log->logged_list[index]); atomic_inc(&ordered->refs); } spin_unlock(&log->log_extents_lock[index]); } spin_unlock_irq(&tree->lock); } void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid) { struct btrfs_ordered_extent *ordered; int index = transid % 2; spin_lock_irq(&log->log_extents_lock[index]); while (!list_empty(&log->logged_list[index])) { ordered = list_first_entry(&log->logged_list[index], struct btrfs_ordered_extent, log_list); list_del_init(&ordered->log_list); spin_unlock_irq(&log->log_extents_lock[index]); wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)); btrfs_put_ordered_extent(ordered); spin_lock_irq(&log->log_extents_lock[index]); } spin_unlock_irq(&log->log_extents_lock[index]); } void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid) { struct btrfs_ordered_extent *ordered; int index = transid % 2; spin_lock_irq(&log->log_extents_lock[index]); while (!list_empty(&log->logged_list[index])) { ordered = list_first_entry(&log->logged_list[index], struct btrfs_ordered_extent, log_list); list_del_init(&ordered->log_list); spin_unlock_irq(&log->log_extents_lock[index]); btrfs_put_ordered_extent(ordered); spin_lock_irq(&log->log_extents_lock[index]); } spin_unlock_irq(&log->log_extents_lock[index]); } /* * used to drop a reference on an ordered extent. This will free * the extent if the last reference is dropped */ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) { struct list_head *cur; struct btrfs_ordered_sum *sum; trace_btrfs_ordered_extent_put(entry->inode, entry); if (atomic_dec_and_test(&entry->refs)) { if (entry->inode) btrfs_add_delayed_iput(entry->inode); while (!list_empty(&entry->list)) { cur = entry->list.next; sum = list_entry(cur, struct btrfs_ordered_sum, list); list_del(&sum->list); kfree(sum); } kmem_cache_free(btrfs_ordered_extent_cache, entry); } } /* * remove an ordered extent from the tree. No references are dropped * and waiters are woken up. */ void btrfs_remove_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry) { struct btrfs_ordered_inode_tree *tree; struct btrfs_root *root = BTRFS_I(inode)->root; struct rb_node *node; tree = &BTRFS_I(inode)->ordered_tree; spin_lock_irq(&tree->lock); node = &entry->rb_node; rb_erase(node, &tree->tree); if (tree->last == node) tree->last = NULL; set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); spin_unlock_irq(&tree->lock); spin_lock(&root->ordered_extent_lock); list_del_init(&entry->root_extent_list); root->nr_ordered_extents--; trace_btrfs_ordered_extent_remove(inode, entry); /* * we have no more ordered extents for this inode and * no dirty pages. We can safely remove it from the * list of ordered extents */ if (RB_EMPTY_ROOT(&tree->tree) && !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { spin_lock(&root->fs_info->ordered_root_lock); list_del_init(&BTRFS_I(inode)->ordered_operations); spin_unlock(&root->fs_info->ordered_root_lock); } if (!root->nr_ordered_extents) { spin_lock(&root->fs_info->ordered_root_lock); BUG_ON(list_empty(&root->ordered_root)); list_del_init(&root->ordered_root); spin_unlock(&root->fs_info->ordered_root_lock); } spin_unlock(&root->ordered_extent_lock); wake_up(&entry->wait); } static void btrfs_run_ordered_extent_work(struct btrfs_work *work) { struct btrfs_ordered_extent *ordered; ordered = container_of(work, struct btrfs_ordered_extent, flush_work); btrfs_start_ordered_extent(ordered->inode, ordered, 1); complete(&ordered->completion); } /* * wait for all the ordered extents in a root. This is done when balancing * space between drives. */ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr) { struct list_head splice, works; struct btrfs_ordered_extent *ordered, *next; int count = 0; INIT_LIST_HEAD(&splice); INIT_LIST_HEAD(&works); mutex_lock(&root->fs_info->ordered_operations_mutex); spin_lock(&root->ordered_extent_lock); list_splice_init(&root->ordered_extents, &splice); while (!list_empty(&splice) && nr) { ordered = list_first_entry(&splice, struct btrfs_ordered_extent, root_extent_list); list_move_tail(&ordered->root_extent_list, &root->ordered_extents); atomic_inc(&ordered->refs); spin_unlock(&root->ordered_extent_lock); ordered->flush_work.func = btrfs_run_ordered_extent_work; list_add_tail(&ordered->work_list, &works); btrfs_queue_worker(&root->fs_info->flush_workers, &ordered->flush_work); cond_resched(); spin_lock(&root->ordered_extent_lock); if (nr != -1) nr--; count++; } list_splice_tail(&splice, &root->ordered_extents); spin_unlock(&root->ordered_extent_lock); list_for_each_entry_safe(ordered, next, &works, work_list) { list_del_init(&ordered->work_list); wait_for_completion(&ordered->completion); btrfs_put_ordered_extent(ordered); cond_resched(); } mutex_unlock(&root->fs_info->ordered_operations_mutex); return count; } void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr) { struct btrfs_root *root; struct list_head splice; int done; INIT_LIST_HEAD(&splice); spin_lock(&fs_info->ordered_root_lock); list_splice_init(&fs_info->ordered_roots, &splice); while (!list_empty(&splice) && nr) { root = list_first_entry(&splice, struct btrfs_root, ordered_root); root = btrfs_grab_fs_root(root); BUG_ON(!root); list_move_tail(&root->ordered_root, &fs_info->ordered_roots); spin_unlock(&fs_info->ordered_root_lock); done = btrfs_wait_ordered_extents(root, nr); btrfs_put_fs_root(root); spin_lock(&fs_info->ordered_root_lock); if (nr != -1) { nr -= done; WARN_ON(nr < 0); } } list_splice_tail(&splice, &fs_info->ordered_roots); spin_unlock(&fs_info->ordered_root_lock); } /* * this is used during transaction commit to write all the inodes * added to the ordered operation list. These files must be fully on * disk before the transaction commits. * * we have two modes here, one is to just start the IO via filemap_flush * and the other is to wait for all the io. When we wait, we have an * extra check to make sure the ordered operation list really is empty * before we return */ int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans, struct btrfs_root *root, int wait) { struct btrfs_inode *btrfs_inode; struct inode *inode; struct btrfs_transaction *cur_trans = trans->transaction; struct list_head splice; struct list_head works; struct btrfs_delalloc_work *work, *next; int ret = 0; INIT_LIST_HEAD(&splice); INIT_LIST_HEAD(&works); mutex_lock(&root->fs_info->ordered_extent_flush_mutex); spin_lock(&root->fs_info->ordered_root_lock); list_splice_init(&cur_trans->ordered_operations, &splice); while (!list_empty(&splice)) { btrfs_inode = list_entry(splice.next, struct btrfs_inode, ordered_operations); inode = &btrfs_inode->vfs_inode; list_del_init(&btrfs_inode->ordered_operations); /* * the inode may be getting freed (in sys_unlink path). */ inode = igrab(inode); if (!inode) continue; if (!wait) list_add_tail(&BTRFS_I(inode)->ordered_operations, &cur_trans->ordered_operations); spin_unlock(&root->fs_info->ordered_root_lock); work = btrfs_alloc_delalloc_work(inode, wait, 1); if (!work) { spin_lock(&root->fs_info->ordered_root_lock); if (list_empty(&BTRFS_I(inode)->ordered_operations)) list_add_tail(&btrfs_inode->ordered_operations, &splice); list_splice_tail(&splice, &cur_trans->ordered_operations); spin_unlock(&root->fs_info->ordered_root_lock); ret = -ENOMEM; goto out; } list_add_tail(&work->list, &works); btrfs_queue_worker(&root->fs_info->flush_workers, &work->work); cond_resched(); spin_lock(&root->fs_info->ordered_root_lock); } spin_unlock(&root->fs_info->ordered_root_lock); out: list_for_each_entry_safe(work, next, &works, list) { list_del_init(&work->list); btrfs_wait_and_free_delalloc_work(work); } mutex_unlock(&root->fs_info->ordered_extent_flush_mutex); return ret; } /* * Used to start IO or wait for a given ordered extent to finish. * * If wait is one, this effectively waits on page writeback for all the pages * in the extent, and it waits on the io completion code to insert * metadata into the btree corresponding to the extent */ void btrfs_start_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry, int wait) { u64 start = entry->file_offset; u64 end = start + entry->len - 1; trace_btrfs_ordered_extent_start(inode, entry); /* * pages in the range can be dirty, clean or writeback. We * start IO on any dirty ones so the wait doesn't stall waiting * for the flusher thread to find them */ if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) filemap_fdatawrite_range(inode->i_mapping, start, end); if (wait) { wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); } } /* * Used to wait on ordered extents across a large range of bytes. */ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) { int ret = 0; u64 end; u64 orig_end; struct btrfs_ordered_extent *ordered; if (start + len < start) { orig_end = INT_LIMIT(loff_t); } else { orig_end = start + len - 1; if (orig_end > INT_LIMIT(loff_t)) orig_end = INT_LIMIT(loff_t); } /* start IO across the range first to instantiate any delalloc * extents */ ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end); if (ret) return ret; /* * So with compression we will find and lock a dirty page and clear the * first one as dirty, setup an async extent, and immediately return * with the entire range locked but with nobody actually marked with * writeback. So we can't just filemap_write_and_wait_range() and * expect it to work since it will just kick off a thread to do the * actual work. So we need to call filemap_fdatawrite_range _again_ * since it will wait on the page lock, which won't be unlocked until * after the pages have been marked as writeback and so we're good to go * from there. We have to do this otherwise we'll miss the ordered * extents and that results in badness. Please Josef, do not think you * know better and pull this out at some point in the future, it is * right and you are wrong. */ if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &BTRFS_I(inode)->runtime_flags)) { ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end); if (ret) return ret; } ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end); if (ret) return ret; end = orig_end; while (1) { ordered = btrfs_lookup_first_ordered_extent(inode, end); if (!ordered) break; if (ordered->file_offset > orig_end) { btrfs_put_ordered_extent(ordered); break; } if (ordered->file_offset + ordered->len <= start) { btrfs_put_ordered_extent(ordered); break; } btrfs_start_ordered_extent(inode, ordered, 1); end = ordered->file_offset; if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) ret = -EIO; btrfs_put_ordered_extent(ordered); if (ret || end == 0 || end == start) break; end--; } return ret; } /* * find an ordered extent corresponding to file_offset. return NULL if * nothing is found, otherwise take a reference on the extent and return it */ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, u64 file_offset) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; spin_lock_irq(&tree->lock); node = tree_search(tree, file_offset); if (!node) goto out; entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (!offset_in_entry(entry, file_offset)) entry = NULL; if (entry) atomic_inc(&entry->refs); out: spin_unlock_irq(&tree->lock); return entry; } /* Since the DIO code tries to lock a wide area we need to look for any ordered * extents that exist in the range, rather than just the start of the range. */ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, u64 file_offset, u64 len) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; spin_lock_irq(&tree->lock); node = tree_search(tree, file_offset); if (!node) { node = tree_search(tree, file_offset + len); if (!node) goto out; } while (1) { entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (range_overlaps(entry, file_offset, len)) break; if (entry->file_offset >= file_offset + len) { entry = NULL; break; } entry = NULL; node = rb_next(node); if (!node) break; } out: if (entry) atomic_inc(&entry->refs); spin_unlock_irq(&tree->lock); return entry; } /* * lookup and return any extent before 'file_offset'. NULL is returned * if none is found */ struct btrfs_ordered_extent * btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; spin_lock_irq(&tree->lock); node = tree_search(tree, file_offset); if (!node) goto out; entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); atomic_inc(&entry->refs); out: spin_unlock_irq(&tree->lock); return entry; } /* * After an extent is done, call this to conditionally update the on disk * i_size. i_size is updated to cover any fully written part of the file. */ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, struct btrfs_ordered_extent *ordered) { struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; u64 disk_i_size; u64 new_i_size; u64 i_size = i_size_read(inode); struct rb_node *node; struct rb_node *prev = NULL; struct btrfs_ordered_extent *test; int ret = 1; spin_lock_irq(&tree->lock); if (ordered) { offset = entry_end(ordered); if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) offset = min(offset, ordered->file_offset + ordered->truncated_len); } else { offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); } disk_i_size = BTRFS_I(inode)->disk_i_size; /* truncate file */ if (disk_i_size > i_size) { BTRFS_I(inode)->disk_i_size = i_size; ret = 0; goto out; } /* * if the disk i_size is already at the inode->i_size, or * this ordered extent is inside the disk i_size, we're done */ if (disk_i_size == i_size) goto out; /* * We still need to update disk_i_size if outstanding_isize is greater * than disk_i_size. */ if (offset <= disk_i_size && (!ordered || ordered->outstanding_isize <= disk_i_size)) goto out; /* * walk backward from this ordered extent to disk_i_size. * if we find an ordered extent then we can't update disk i_size * yet */ if (ordered) { node = rb_prev(&ordered->rb_node); } else { prev = tree_search(tree, offset); /* * we insert file extents without involving ordered struct, * so there should be no ordered struct cover this offset */ if (prev) { test = rb_entry(prev, struct btrfs_ordered_extent, rb_node); BUG_ON(offset_in_entry(test, offset)); } node = prev; } for (; node; node = rb_prev(node)) { test = rb_entry(node, struct btrfs_ordered_extent, rb_node); /* We treat this entry as if it doesnt exist */ if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) continue; if (test->file_offset + test->len <= disk_i_size) break; if (test->file_offset >= i_size) break; if (entry_end(test) > disk_i_size) { /* * we don't update disk_i_size now, so record this * undealt i_size. Or we will not know the real * i_size. */ if (test->outstanding_isize < offset) test->outstanding_isize = offset; if (ordered && ordered->outstanding_isize > test->outstanding_isize) test->outstanding_isize = ordered->outstanding_isize; goto out; } } new_i_size = min_t(u64, offset, i_size); /* * Some ordered extents may completed before the current one, and * we hold the real i_size in ->outstanding_isize. */ if (ordered && ordered->outstanding_isize > new_i_size) new_i_size = min_t(u64, ordered->outstanding_isize, i_size); BTRFS_I(inode)->disk_i_size = new_i_size; ret = 0; out: /* * We need to do this because we can't remove ordered extents until * after the i_disk_size has been updated and then the inode has been * updated to reflect the change, so we need to tell anybody who finds * this ordered extent that we've already done all the real work, we * just haven't completed all the other work. */ if (ordered) set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); spin_unlock_irq(&tree->lock); return ret; } /* * search the ordered extents for one corresponding to 'offset' and * try to find a checksum. This is used because we allow pages to * be reclaimed before their checksum is actually put into the btree */ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum, int len) { struct btrfs_ordered_sum *ordered_sum; struct btrfs_ordered_extent *ordered; struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; unsigned long num_sectors; unsigned long i; u32 sectorsize = BTRFS_I(inode)->root->sectorsize; int index = 0; ordered = btrfs_lookup_ordered_extent(inode, offset); if (!ordered) return 0; spin_lock_irq(&tree->lock); list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { if (disk_bytenr >= ordered_sum->bytenr && disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { i = (disk_bytenr - ordered_sum->bytenr) >> inode->i_sb->s_blocksize_bits; num_sectors = ordered_sum->len >> inode->i_sb->s_blocksize_bits; num_sectors = min_t(int, len - index, num_sectors - i); memcpy(sum + index, ordered_sum->sums + i, num_sectors); index += (int)num_sectors; if (index == len) goto out; disk_bytenr += num_sectors * sectorsize; } } out: spin_unlock_irq(&tree->lock); btrfs_put_ordered_extent(ordered); return index; } /* * add a given inode to the list of inodes that must be fully on * disk before a transaction commit finishes. * * This basically gives us the ext3 style data=ordered mode, and it is mostly * used to make sure renamed files are fully on disk. * * It is a noop if the inode is already fully on disk. * * If trans is not null, we'll do a friendly check for a transaction that * is already flushing things and force the IO down ourselves. */ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) { struct btrfs_transaction *cur_trans = trans->transaction; u64 last_mod; last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans); /* * if this file hasn't been changed since the last transaction * commit, we can safely return without doing anything */ if (last_mod <= root->fs_info->last_trans_committed) return; spin_lock(&root->fs_info->ordered_root_lock); if (list_empty(&BTRFS_I(inode)->ordered_operations)) { list_add_tail(&BTRFS_I(inode)->ordered_operations, &cur_trans->ordered_operations); } spin_unlock(&root->fs_info->ordered_root_lock); } int __init ordered_data_init(void) { btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", sizeof(struct btrfs_ordered_extent), 0, SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); if (!btrfs_ordered_extent_cache) return -ENOMEM; return 0; } void ordered_data_exit(void) { if (btrfs_ordered_extent_cache) kmem_cache_destroy(btrfs_ordered_extent_cache); }
gpl-2.0
indodev/kernel-samsung-3.0
drivers/media/video/msm/mt9p012_km.c
245
31507
/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/slab.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/kernel.h> #include <media/msm_camera.h> #include <mach/gpio.h> #include <mach/camera.h> #include "mt9p012_km.h" /*============================================================= SENSOR REGISTER DEFINES ==============================================================*/ #define MT9P012_KM_REG_MODEL_ID 0x0000 #define MT9P012_KM_MODEL_ID 0x2800 #define REG_GROUPED_PARAMETER_HOLD 0x0104 #define GROUPED_PARAMETER_HOLD 0x0100 #define GROUPED_PARAMETER_UPDATE 0x0000 #define REG_COARSE_INT_TIME 0x3012 #define REG_VT_PIX_CLK_DIV 0x0300 #define REG_VT_SYS_CLK_DIV 0x0302 #define REG_PRE_PLL_CLK_DIV 0x0304 #define REG_PLL_MULTIPLIER 0x0306 #define REG_OP_PIX_CLK_DIV 0x0308 #define REG_OP_SYS_CLK_DIV 0x030A #define REG_SCALE_M 0x0404 #define REG_FRAME_LENGTH_LINES 0x300A #define REG_LINE_LENGTH_PCK 0x300C #define REG_X_ADDR_START 0x3004 #define REG_Y_ADDR_START 0x3002 #define REG_X_ADDR_END 0x3008 #define REG_Y_ADDR_END 0x3006 #define REG_X_OUTPUT_SIZE 0x034C #define REG_Y_OUTPUT_SIZE 0x034E #define REG_FINE_INTEGRATION_TIME 0x3014 #define REG_ROW_SPEED 0x3016 #define MT9P012_KM_REG_RESET_REGISTER 0x301A #define MT9P012_KM_RESET_REGISTER_PWON 0x10CC #define MT9P012_KM_RESET_REGISTER_PWOFF 0x10C8 #define REG_READ_MODE 0x3040 #define REG_GLOBAL_GAIN 0x305E #define REG_TEST_PATTERN_MODE 0x3070 enum mt9p012_km_test_mode { TEST_OFF, TEST_1, TEST_2, TEST_3 }; enum mt9p012_km_resolution { QTR_SIZE, FULL_SIZE, INVALID_SIZE }; enum mt9p012_km_reg_update { /* Sensor egisters that need to be updated during initialization */ REG_INIT, /* Sensor egisters that needs periodic I2C writes */ UPDATE_PERIODIC, /* All the sensor Registers will be updated */ UPDATE_ALL, /* Not valid update */ UPDATE_INVALID }; enum mt9p012_km_setting { RES_PREVIEW, RES_CAPTURE }; uint8_t mode_mask = 0x04; /* actuator's Slave Address */ #define MT9P012_KM_AF_I2C_ADDR (0x18 >> 1) /* AF Total steps parameters */ #define MT9P012_KM_STEPS_NEAR_TO_CLOSEST_INF 30 #define MT9P012_KM_TOTAL_STEPS_NEAR_TO_FAR 30 /* Time in milisecs for waiting for the sensor to reset.*/ #define MT9P012_KM_RESET_DELAY_MSECS 66 /* for 20 fps preview */ #define MT9P012_KM_DEFAULT_CLOCK_RATE 24000000 struct mt9p012_km_work { struct work_struct work; }; static struct mt9p012_km_work *mt9p012_km_sensorw; static struct i2c_client *mt9p012_km_client; struct mt9p012_km_ctrl { const struct msm_camera_sensor_info *sensordata; int sensormode; uint32_t fps_divider; /* init to 1 * 0x00000400 */ uint32_t pict_fps_divider; /* init to 1 * 0x00000400 */ uint16_t curr_lens_pos; uint16_t init_curr_lens_pos; uint16_t my_reg_gain; uint32_t my_reg_line_count; enum mt9p012_km_resolution prev_res; enum mt9p012_km_resolution pict_res; enum mt9p012_km_resolution curr_res; enum mt9p012_km_test_mode set_test; }; static uint16_t update_type = UPDATE_PERIODIC; static struct mt9p012_km_ctrl *mt9p012_km_ctrl; static DECLARE_WAIT_QUEUE_HEAD(mt9p012_km_wait_queue); DEFINE_MUTEX(mt9p012_km_mut); /*=============================================================*/ static int mt9p012_km_i2c_rxdata(unsigned short saddr, unsigned char *rxdata, int length) { struct i2c_msg msgs[] = { { .addr = saddr << 1, .flags = 0, .len = 2, .buf = rxdata, }, { .addr = saddr << 1, .flags = I2C_M_RD, .len = length, .buf = rxdata, }, }; if (i2c_transfer(mt9p012_km_client->adapter, msgs, 2) < 0) { CDBG("mt9p012_km_i2c_rxdata failed!\n"); return -EIO; } return 0; } static int32_t mt9p012_km_i2c_read_w(unsigned short saddr, unsigned short raddr, unsigned short *rdata) { int32_t rc = 0; unsigned char buf[4]; if (!rdata) return -EIO; memset(buf, 0, sizeof(buf)); buf[0] = (raddr & 0xFF00) >> 8; buf[1] = (raddr & 0x00FF); rc = mt9p012_km_i2c_rxdata(saddr, buf, 2); if (rc < 0) return rc; *rdata = buf[0] << 8 | buf[1]; if (rc < 0) CDBG("mt9p012_km_i2c_read failed!\n"); return rc; } static int32_t mt9p012_km_i2c_txdata(unsigned short saddr, unsigned char *txdata, int length) { struct i2c_msg msg[] = { { .addr = saddr << 1, .flags = 0, .len = length, .buf = txdata, }, }; if (i2c_transfer(mt9p012_km_client->adapter, msg, 1) < 0) { CDBG("mt9p012_km_i2c_txdata failed\n"); return -EIO; } return 0; } static int32_t mt9p012_km_i2c_write_b(unsigned short saddr, unsigned short baddr, unsigned short bdata) { int32_t rc = -EIO; unsigned char buf[2]; memset(buf, 0, sizeof(buf)); buf[0] = baddr; buf[1] = bdata; rc = mt9p012_km_i2c_txdata(saddr, buf, 2); if (rc < 0) CDBG("i2c_write failed, saddr = 0x%x addr = 0x%x, val =0x%x!\n", saddr, baddr, bdata); return rc; } static int32_t mt9p012_km_i2c_write_w(unsigned short saddr, unsigned short waddr, unsigned short wdata) { int32_t rc = -EIO; unsigned char buf[4]; memset(buf, 0, sizeof(buf)); buf[0] = (waddr & 0xFF00) >> 8; buf[1] = (waddr & 0x00FF); buf[2] = (wdata & 0xFF00) >> 8; buf[3] = (wdata & 0x00FF); rc = mt9p012_km_i2c_txdata(saddr, buf, 4); if (rc < 0) CDBG("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n", waddr, wdata); return rc; } static int32_t mt9p012_km_i2c_write_w_table(struct mt9p012_km_i2c_reg_conf const *reg_conf_tbl, int num) { int i; int32_t rc = -EIO; for (i = 0; i < num; i++) { rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, reg_conf_tbl->waddr, reg_conf_tbl->wdata); if (rc < 0) break; reg_conf_tbl++; } return rc; } static int32_t mt9p012_km_test(enum mt9p012_km_test_mode mo) { int32_t rc = 0; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); if (rc < 0) return rc; if (mo == TEST_OFF) return 0; else { rc = mt9p012_km_i2c_write_w_table(mt9p012_km_regs.ttbl, mt9p012_km_regs.ttbl_size); if (rc < 0) return rc; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_TEST_PATTERN_MODE, (uint16_t) mo); if (rc < 0) return rc; } rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); if (rc < 0) return rc; return rc; } static int32_t mt9p012_km_lens_shading_enable(uint8_t is_enable) { int32_t rc = 0; CDBG("%s: entered. enable = %d\n", __func__, is_enable); rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); if (rc < 0) return rc; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, 0x3780, ((uint16_t) is_enable) << 15); if (rc < 0) return rc; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); CDBG("%s: exiting. rc = %d\n", __func__, rc); return rc; } static int32_t mt9p012_km_set_lc(void) { int32_t rc; rc = mt9p012_km_i2c_write_w_table(mt9p012_km_regs.lctbl, mt9p012_km_regs.lctbl_size); return rc; } static void mt9p012_km_get_pict_fps(uint16_t fps, uint16_t *pfps) { /* input fps is preview fps in Q8 format */ uint32_t divider; /*Q10 */ uint32_t pclk_mult; /*Q10 */ uint32_t d1; uint32_t d2; d1 = (uint32_t)( (mt9p012_km_regs.reg_pat[RES_PREVIEW].frame_length_lines * 0x00000400) / mt9p012_km_regs.reg_pat[RES_CAPTURE].frame_length_lines); d2 = (uint32_t)( (mt9p012_km_regs.reg_pat[RES_PREVIEW].line_length_pck * 0x00000400) / mt9p012_km_regs.reg_pat[RES_CAPTURE].line_length_pck); divider = (uint32_t) (d1 * d2) / 0x00000400; pclk_mult = (uint32_t) ((mt9p012_km_regs.reg_pat[RES_CAPTURE]. pll_multiplier * 0x00000400) / (mt9p012_km_regs.reg_pat[RES_PREVIEW].pll_multiplier)); /* Verify PCLK settings and frame sizes. */ *pfps = (uint16_t)((((fps * pclk_mult) / 0x00000400) * divider)/ 0x00000400); } static uint16_t mt9p012_km_get_prev_lines_pf(void) { if (mt9p012_km_ctrl->prev_res == QTR_SIZE) return mt9p012_km_regs.reg_pat[RES_PREVIEW].frame_length_lines; else return mt9p012_km_regs.reg_pat[RES_CAPTURE].frame_length_lines; } static uint16_t mt9p012_km_get_prev_pixels_pl(void) { if (mt9p012_km_ctrl->prev_res == QTR_SIZE) return mt9p012_km_regs.reg_pat[RES_PREVIEW].line_length_pck; else return mt9p012_km_regs.reg_pat[RES_CAPTURE].line_length_pck; } static uint16_t mt9p012_km_get_pict_lines_pf(void) { return mt9p012_km_regs.reg_pat[RES_CAPTURE].frame_length_lines; } static uint16_t mt9p012_km_get_pict_pixels_pl(void) { return mt9p012_km_regs.reg_pat[RES_CAPTURE].line_length_pck; } static uint32_t mt9p012_km_get_pict_max_exp_lc(void) { uint16_t snapshot_lines_per_frame; if (mt9p012_km_ctrl->pict_res == QTR_SIZE) snapshot_lines_per_frame = mt9p012_km_regs.reg_pat[RES_PREVIEW].frame_length_lines - 1; else snapshot_lines_per_frame = mt9p012_km_regs.reg_pat[RES_CAPTURE].frame_length_lines - 1; return snapshot_lines_per_frame * 24; } static int32_t mt9p012_km_set_fps(struct fps_cfg *fps) { int32_t rc = 0; mt9p012_km_ctrl->fps_divider = fps->fps_div; mt9p012_km_ctrl->pict_fps_divider = fps->pict_fps_div; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); if (rc < 0) return -EBUSY; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_FRAME_LENGTH_LINES, mt9p012_km_regs.reg_pat[mt9p012_km_ctrl->sensormode]. frame_length_lines * mt9p012_km_ctrl->fps_divider / 0x00000400); if (rc < 0) return rc; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); return rc; } static int32_t mt9p012_km_write_exp_gain(uint16_t gain, uint32_t line) { uint16_t max_legal_gain = 0x01FF; uint32_t line_length_ratio = 0x00000400; enum mt9p012_km_setting setting; int32_t rc = 0; CDBG("Line:%d mt9p012_km_write_exp_gain \n", __LINE__); if (mt9p012_km_ctrl->sensormode == SENSOR_PREVIEW_MODE) { mt9p012_km_ctrl->my_reg_gain = gain; mt9p012_km_ctrl->my_reg_line_count = (uint16_t) line; } if (gain > max_legal_gain) { CDBG("Max legal gain Line:%d \n", __LINE__); gain = max_legal_gain; } /* Verify no overflow */ if (mt9p012_km_ctrl->sensormode == SENSOR_PREVIEW_MODE) { line = (uint32_t) (line * mt9p012_km_ctrl->fps_divider / 0x00000400); setting = RES_PREVIEW; } else { line = (uint32_t) (line * mt9p012_km_ctrl->pict_fps_divider / 0x00000400); setting = RES_CAPTURE; } gain |= 0x0200; if ((mt9p012_km_regs.reg_pat[setting].frame_length_lines - 1) < line) { line_length_ratio = (uint32_t) (line * 0x00000400) / (mt9p012_km_regs.reg_pat[setting].frame_length_lines - 1); } else line_length_ratio = 0x00000400; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); if (rc < 0) { CDBG("mt9p012_km_i2c_write_w failed... Line:%d \n", __LINE__); return rc; } rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_GLOBAL_GAIN, gain); if (rc < 0) { CDBG("mt9p012_km_i2c_write_w failed... Line:%d \n", __LINE__); return rc; } rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_LINE_LENGTH_PCK, (uint16_t) (mt9p012_km_regs.reg_pat[setting]. line_length_pck * line_length_ratio / 0x00000400)); if (rc < 0) return rc; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_COARSE_INT_TIME, (uint16_t) ((line * 0x00000400)/ line_length_ratio)); if (rc < 0) { CDBG("mt9p012_km_i2c_write_w failed... Line:%d \n", __LINE__); return rc; } rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); if (rc < 0) { CDBG("mt9p012_km_i2c_write_w failed... Line:%d \n", __LINE__); return rc; } CDBG("mt9p012_km_write_exp_gain: gain = %d, line = %d\n", gain, line); return rc; } static int32_t mt9p012_km_set_pict_exp_gain(uint16_t gain, uint32_t line) { int32_t rc = 0; CDBG("Line:%d mt9p012_km_set_pict_exp_gain \n", __LINE__); rc = mt9p012_km_write_exp_gain(gain, line); if (rc < 0) { CDBG("Line:%d mt9p012_km_set_pict_exp_gain failed... \n", __LINE__); return rc; } rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, MT9P012_KM_REG_RESET_REGISTER, 0x10CC | 0x0002); if (rc < 0) { CDBG("mt9p012_km_i2c_write_w failed... Line:%d \n", __LINE__); return rc; } mdelay(5); /* camera_timed_wait(snapshot_wait*exposure_ratio); */ return rc; } static int32_t mt9p012_km_setting(enum mt9p012_km_reg_update rupdate, enum mt9p012_km_setting rt) { int32_t rc = 0; switch (rupdate) { case UPDATE_PERIODIC: if (rt == RES_PREVIEW || rt == RES_CAPTURE) { struct mt9p012_km_i2c_reg_conf ppc_tbl[] = { {REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD}, {REG_ROW_SPEED, mt9p012_km_regs.reg_pat[rt].row_speed}, {REG_X_ADDR_START, mt9p012_km_regs.reg_pat[rt].x_addr_start}, {REG_X_ADDR_END, mt9p012_km_regs.reg_pat[rt].x_addr_end}, {REG_Y_ADDR_START, mt9p012_km_regs.reg_pat[rt].y_addr_start}, {REG_Y_ADDR_END, mt9p012_km_regs.reg_pat[rt].y_addr_end}, {REG_READ_MODE, mt9p012_km_regs.reg_pat[rt].read_mode}, {REG_SCALE_M, mt9p012_km_regs.reg_pat[rt].scale_m}, {REG_X_OUTPUT_SIZE, mt9p012_km_regs.reg_pat[rt].x_output_size}, {REG_Y_OUTPUT_SIZE, mt9p012_km_regs.reg_pat[rt].y_output_size}, {REG_LINE_LENGTH_PCK, mt9p012_km_regs.reg_pat[rt].line_length_pck}, {REG_FRAME_LENGTH_LINES, (mt9p012_km_regs.reg_pat[rt].frame_length_lines * mt9p012_km_ctrl->fps_divider / 0x00000400)}, {REG_COARSE_INT_TIME, mt9p012_km_regs.reg_pat[rt].coarse_int_time}, {REG_FINE_INTEGRATION_TIME, mt9p012_km_regs.reg_pat[rt].fine_int_time}, {REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE}, }; if (update_type == REG_INIT) { update_type = rupdate; return rc; } rc = mt9p012_km_i2c_write_w_table(&ppc_tbl[0], ARRAY_SIZE(ppc_tbl)); if (rc < 0) return rc; rc = mt9p012_km_test(mt9p012_km_ctrl->set_test); if (rc < 0) return rc; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, MT9P012_KM_REG_RESET_REGISTER, 0x10cc | 0x0002); if (rc < 0) return rc; mdelay(15); /* 15? wait for sensor to transition */ return rc; } break; /* UPDATE_PERIODIC */ case REG_INIT: if (rt == RES_PREVIEW || rt == RES_CAPTURE) { struct mt9p012_km_i2c_reg_conf ipc_tbl1[] = { {MT9P012_KM_REG_RESET_REGISTER, MT9P012_KM_RESET_REGISTER_PWOFF}, {REG_VT_PIX_CLK_DIV, mt9p012_km_regs.reg_pat[rt].vt_pix_clk_div}, {REG_VT_SYS_CLK_DIV, mt9p012_km_regs.reg_pat[rt].vt_sys_clk_div}, {REG_PRE_PLL_CLK_DIV, mt9p012_km_regs.reg_pat[rt].pre_pll_clk_div}, {REG_PLL_MULTIPLIER, mt9p012_km_regs.reg_pat[rt].pll_multiplier}, {REG_OP_PIX_CLK_DIV, mt9p012_km_regs.reg_pat[rt].op_pix_clk_div}, {REG_OP_SYS_CLK_DIV, mt9p012_km_regs.reg_pat[rt].op_sys_clk_div}, {MT9P012_KM_REG_RESET_REGISTER, MT9P012_KM_RESET_REGISTER_PWON}, }; struct mt9p012_km_i2c_reg_conf ipc_tbl2[] = { {REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD}, /* Optimized register settings for Rev3 Silicon */ {0x308A, 0x6424}, {0x3092, 0x0A52}, {0x3094, 0x4656}, {0x3096, 0x5652}, {0x0204, 0x0010}, {0x0206, 0x0010}, {0x0208, 0x0010}, {0x020A, 0x0010}, {0x020C, 0x0010}, {0x3088, 0x6FF6}, {0x3154, 0x0282}, {0x3156, 0x0381}, {0x3162, 0x04CE}, }; struct mt9p012_km_i2c_reg_conf ipc_tbl3[] = { /* Set preview or snapshot mode */ {REG_ROW_SPEED, mt9p012_km_regs.reg_pat[rt].row_speed}, {REG_X_ADDR_START, mt9p012_km_regs.reg_pat[rt].x_addr_start}, {REG_X_ADDR_END, mt9p012_km_regs.reg_pat[rt].x_addr_end}, {REG_Y_ADDR_START, mt9p012_km_regs.reg_pat[rt].y_addr_start}, {REG_Y_ADDR_END, mt9p012_km_regs.reg_pat[rt].y_addr_end}, {REG_READ_MODE, mt9p012_km_regs.reg_pat[rt].read_mode}, {REG_SCALE_M, mt9p012_km_regs.reg_pat[rt].scale_m}, {REG_X_OUTPUT_SIZE, mt9p012_km_regs.reg_pat[rt].x_output_size}, {REG_Y_OUTPUT_SIZE, mt9p012_km_regs.reg_pat[rt].y_output_size}, {REG_LINE_LENGTH_PCK, mt9p012_km_regs.reg_pat[rt].line_length_pck}, {REG_FRAME_LENGTH_LINES, mt9p012_km_regs.reg_pat[rt]. frame_length_lines}, {REG_COARSE_INT_TIME, mt9p012_km_regs.reg_pat[rt].coarse_int_time}, {REG_FINE_INTEGRATION_TIME, mt9p012_km_regs.reg_pat[rt].fine_int_time}, {REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE}, }; /* reset fps_divider */ mt9p012_km_ctrl->fps_divider = 1 * 0x0400; rc = mt9p012_km_i2c_write_w_table(&ipc_tbl1[0], ARRAY_SIZE(ipc_tbl1)); if (rc < 0) return rc; mdelay(15); rc = mt9p012_km_i2c_write_w_table(&ipc_tbl2[0], ARRAY_SIZE(ipc_tbl2)); if (rc < 0) return rc; mdelay(5); rc = mt9p012_km_i2c_write_w_table(&ipc_tbl3[0], ARRAY_SIZE(ipc_tbl3)); if (rc < 0) return rc; /* load lens shading */ rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); if (rc < 0) return rc; rc = mt9p012_km_set_lc(); if (rc < 0) return rc; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); if (rc < 0) return rc; } update_type = rupdate; break; /* case REG_INIT: */ default: rc = -EINVAL; break; } /* switch (rupdate) */ return rc; } static int32_t mt9p012_km_video_config(int mode, int res) { int32_t rc; switch (res) { case QTR_SIZE: rc = mt9p012_km_setting(UPDATE_PERIODIC, RES_PREVIEW); if (rc < 0) return rc; CDBG("mt9p012_km sensor configuration done!\n"); break; case FULL_SIZE: rc = mt9p012_km_setting(UPDATE_PERIODIC, RES_CAPTURE); if (rc < 0) return rc; break; default: return 0; } /* switch */ mt9p012_km_ctrl->prev_res = res; mt9p012_km_ctrl->curr_res = res; mt9p012_km_ctrl->sensormode = mode; rc = mt9p012_km_write_exp_gain(mt9p012_km_ctrl->my_reg_gain, mt9p012_km_ctrl->my_reg_line_count); rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, MT9P012_KM_REG_RESET_REGISTER, 0x10cc | 0x0002); mdelay(15); return rc; } static int32_t mt9p012_km_snapshot_config(int mode) { int32_t rc = 0; rc = mt9p012_km_setting(UPDATE_PERIODIC, RES_CAPTURE); if (rc < 0) return rc; mt9p012_km_ctrl->curr_res = mt9p012_km_ctrl->pict_res; mt9p012_km_ctrl->sensormode = mode; return rc; } static int32_t mt9p012_km_raw_snapshot_config(int mode) { int32_t rc = 0; rc = mt9p012_km_setting(UPDATE_PERIODIC, RES_CAPTURE); if (rc < 0) return rc; mt9p012_km_ctrl->curr_res = mt9p012_km_ctrl->pict_res; mt9p012_km_ctrl->sensormode = mode; return rc; } static int32_t mt9p012_km_power_down(void) { int32_t rc = 0; rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, MT9P012_KM_REG_RESET_REGISTER, MT9P012_KM_RESET_REGISTER_PWOFF); mdelay(5); return rc; } static int32_t mt9p012_km_move_focus(int direction, int32_t num_steps) { int16_t step_direction; int16_t actual_step; int16_t next_position; uint8_t code_val_msb, code_val_lsb; if (num_steps > MT9P012_KM_TOTAL_STEPS_NEAR_TO_FAR) num_steps = MT9P012_KM_TOTAL_STEPS_NEAR_TO_FAR; else if (num_steps == 0) { CDBG("mt9p012_km_move_focus failed at line %d ...\n", __LINE__); return -EINVAL; } if (direction == MOVE_NEAR) step_direction = 16; /* 10bit */ else if (direction == MOVE_FAR) step_direction = -16; /* 10 bit */ else { CDBG("mt9p012_km_move_focus failed at line %d ...\n", __LINE__); return -EINVAL; } if (mt9p012_km_ctrl->curr_lens_pos < mt9p012_km_ctrl->init_curr_lens_pos) mt9p012_km_ctrl->curr_lens_pos = mt9p012_km_ctrl->init_curr_lens_pos; actual_step = (int16_t) (step_direction * (int16_t) num_steps); next_position = (int16_t) (mt9p012_km_ctrl->curr_lens_pos + actual_step); if (next_position > 1023) next_position = 1023; else if (next_position < 0) next_position = 0; code_val_msb = next_position >> 4; code_val_lsb = (next_position & 0x000F) << 4; code_val_lsb |= mode_mask; /* Writing the digital code for current to the actuator */ if (mt9p012_km_i2c_write_b(MT9P012_KM_AF_I2C_ADDR >> 1, code_val_msb, code_val_lsb) < 0) { CDBG("mt9p012_km_move_focus failed at line %d ...\n", __LINE__); return -EBUSY; } /* Storing the current lens Position */ mt9p012_km_ctrl->curr_lens_pos = next_position; return 0; } static int32_t mt9p012_km_set_default_focus(void) { int32_t rc = 0; uint8_t code_val_msb, code_val_lsb; code_val_msb = 0x00; code_val_lsb = 0x04; /* Write the digital code for current to the actuator */ rc = mt9p012_km_i2c_write_b(MT9P012_KM_AF_I2C_ADDR >> 1, code_val_msb, code_val_lsb); mt9p012_km_ctrl->curr_lens_pos = 0; mt9p012_km_ctrl->init_curr_lens_pos = 0; return rc; } static int mt9p012_km_probe_init_done(const struct msm_camera_sensor_info *data) { gpio_direction_output(data->sensor_reset, 0); gpio_free(data->sensor_reset); return 0; } static int mt9p012_km_probe_init_sensor(const struct msm_camera_sensor_info *data) { int32_t rc; uint16_t chipid; rc = gpio_request(data->sensor_reset, "mt9p012_km"); if (!rc) gpio_direction_output(data->sensor_reset, 1); else goto init_probe_done; msleep(20); /* RESET the sensor image part via I2C command */ CDBG("mt9p012_km_sensor_init(): reseting sensor.\n"); rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, MT9P012_KM_REG_RESET_REGISTER, 0x10CC | 0x0001); if (rc < 0) { CDBG("sensor reset failed. rc = %d\n", rc); goto init_probe_fail; } msleep(MT9P012_KM_RESET_DELAY_MSECS); /* 3. Read sensor Model ID: */ rc = mt9p012_km_i2c_read_w(mt9p012_km_client->addr, MT9P012_KM_REG_MODEL_ID, &chipid); if (rc < 0) goto init_probe_fail; /* 4. Compare sensor ID to MT9T012VC ID: */ if (chipid != MT9P012_KM_MODEL_ID) { CDBG("mt9p012_km wrong model_id = 0x%x\n", chipid); rc = -ENODEV; goto init_probe_fail; } rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, 0x306E, 0x9080); if (rc < 0) { CDBG("REV_7 write failed. rc = %d\n", rc); goto init_probe_fail; } /* RESET_REGISTER, enable parallel interface and disable serialiser */ CDBG("mt9p012_km_sensor_init(): enabling parallel interface.\n"); rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, 0x301A, 0x10CC); if (rc < 0) { CDBG("enable parallel interface failed. rc = %d\n", rc); goto init_probe_fail; } /* To disable the 2 extra lines */ rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, 0x3064, 0x0805); if (rc < 0) { CDBG("disable the 2 extra lines failed. rc = %d\n", rc); goto init_probe_fail; } goto init_probe_done; init_probe_fail: mt9p012_km_probe_init_done(data); init_probe_done: return rc; } static int mt9p012_km_sensor_open_init(const struct msm_camera_sensor_info *data) { int32_t rc; mt9p012_km_ctrl = kzalloc(sizeof(struct mt9p012_km_ctrl), GFP_KERNEL); if (!mt9p012_km_ctrl) { CDBG("mt9p012_km_init failed!\n"); rc = -ENOMEM; goto init_done; } mt9p012_km_ctrl->fps_divider = 1 * 0x00000400; mt9p012_km_ctrl->pict_fps_divider = 1 * 0x00000400; mt9p012_km_ctrl->set_test = TEST_OFF; mt9p012_km_ctrl->prev_res = QTR_SIZE; mt9p012_km_ctrl->pict_res = FULL_SIZE; if (data) mt9p012_km_ctrl->sensordata = data; msm_camio_camif_pad_reg_reset(); mdelay(20); rc = mt9p012_km_probe_init_sensor(data); if (rc < 0) goto init_fail1; if (mt9p012_km_ctrl->prev_res == QTR_SIZE) rc = mt9p012_km_setting(REG_INIT, RES_PREVIEW); else rc = mt9p012_km_setting(REG_INIT, RES_CAPTURE); if (rc < 0) { CDBG("mt9p012_km_setting failed. rc = %d\n", rc); goto init_fail1; } /* sensor : output enable */ CDBG("mt9p012_km_sensor_open_init(): enabling output.\n"); rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, MT9P012_KM_REG_RESET_REGISTER, MT9P012_KM_RESET_REGISTER_PWON); if (rc < 0) { CDBG("sensor output enable failed. rc = %d\n", rc); goto init_fail1; } if (rc >= 0) goto init_done; init_fail1: mt9p012_km_probe_init_done(data); kfree(mt9p012_km_ctrl); init_done: return rc; } static int mt9p012_km_init_client(struct i2c_client *client) { /* Initialize the MSM_CAMI2C Chip */ init_waitqueue_head(&mt9p012_km_wait_queue); return 0; } static int32_t mt9p012_km_set_sensor_mode(int mode, int res) { int32_t rc = 0; switch (mode) { case SENSOR_PREVIEW_MODE: rc = mt9p012_km_video_config(mode, res); break; case SENSOR_SNAPSHOT_MODE: rc = mt9p012_km_snapshot_config(mode); break; case SENSOR_RAW_SNAPSHOT_MODE: rc = mt9p012_km_raw_snapshot_config(mode); break; default: rc = -EINVAL; break; } return rc; } int mt9p012_km_sensor_config(void __user *argp) { struct sensor_cfg_data cdata; int rc = 0; if (copy_from_user(&cdata, (void *)argp, sizeof(struct sensor_cfg_data))) return -EFAULT; mutex_lock(&mt9p012_km_mut); CDBG("%s: cfgtype = %d\n", __func__, cdata.cfgtype); switch (cdata.cfgtype) { case CFG_GET_PICT_FPS: mt9p012_km_get_pict_fps(cdata.cfg.gfps.prevfps, &(cdata.cfg.gfps.pictfps)); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PREV_L_PF: cdata.cfg.prevl_pf = mt9p012_km_get_prev_lines_pf(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PREV_P_PL: cdata.cfg.prevp_pl = mt9p012_km_get_prev_pixels_pl(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PICT_L_PF: cdata.cfg.pictl_pf = mt9p012_km_get_pict_lines_pf(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PICT_P_PL: cdata.cfg.pictp_pl = mt9p012_km_get_pict_pixels_pl(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PICT_MAX_EXP_LC: cdata.cfg.pict_max_exp_lc = mt9p012_km_get_pict_max_exp_lc(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_SET_FPS: case CFG_SET_PICT_FPS: rc = mt9p012_km_set_fps(&(cdata.cfg.fps)); break; case CFG_SET_EXP_GAIN: rc = mt9p012_km_write_exp_gain(cdata.cfg.exp_gain.gain, cdata.cfg.exp_gain.line); break; case CFG_SET_PICT_EXP_GAIN: CDBG("Line:%d CFG_SET_PICT_EXP_GAIN \n", __LINE__); rc = mt9p012_km_set_pict_exp_gain(cdata.cfg.exp_gain.gain, cdata.cfg.exp_gain.line); break; case CFG_SET_MODE: rc = mt9p012_km_set_sensor_mode(cdata.mode, cdata.rs); break; case CFG_PWR_DOWN: rc = mt9p012_km_power_down(); break; case CFG_MOVE_FOCUS: CDBG("mt9p012_km_ioctl: CFG_MOVE_FOCUS: cdata.cfg.focus.dir=%d \ cdata.cfg.focus.steps=%d\n", cdata.cfg.focus.dir, cdata.cfg.focus.steps); rc = mt9p012_km_move_focus(cdata.cfg.focus.dir, cdata.cfg.focus.steps); break; case CFG_SET_DEFAULT_FOCUS: rc = mt9p012_km_set_default_focus(); break; case CFG_SET_LENS_SHADING: CDBG("%s: CFG_SET_LENS_SHADING\n", __func__); rc = mt9p012_km_lens_shading_enable(cdata.cfg.lens_shading); break; case CFG_GET_AF_MAX_STEPS: cdata.max_steps = MT9P012_KM_STEPS_NEAR_TO_CLOSEST_INF; if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_SET_EFFECT: default: rc = -EINVAL; break; } mutex_unlock(&mt9p012_km_mut); return rc; } int mt9p012_km_sensor_release(void) { int rc = -EBADF; mutex_lock(&mt9p012_km_mut); mt9p012_km_power_down(); gpio_direction_output(mt9p012_km_ctrl->sensordata->sensor_reset, 0); gpio_free(mt9p012_km_ctrl->sensordata->sensor_reset); gpio_direction_output(mt9p012_km_ctrl->sensordata->vcm_pwd, 0); gpio_free(mt9p012_km_ctrl->sensordata->vcm_pwd); kfree(mt9p012_km_ctrl); mt9p012_km_ctrl = NULL; CDBG("mt9p012_km_release completed\n"); mutex_unlock(&mt9p012_km_mut); return rc; } static int mt9p012_km_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; CDBG("mt9p012_km_probe called!\n"); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { CDBG("i2c_check_functionality failed\n"); goto probe_failure; } mt9p012_km_sensorw = kzalloc(sizeof(struct mt9p012_km_work), GFP_KERNEL); if (!mt9p012_km_sensorw) { CDBG("kzalloc failed.\n"); rc = -ENOMEM; goto probe_failure; } i2c_set_clientdata(client, mt9p012_km_sensorw); mt9p012_km_init_client(client); mt9p012_km_client = client; mdelay(50); CDBG("mt9p012_km_probe successed! rc = %d\n", rc); return 0; probe_failure: CDBG("mt9p012_km_probe failed! rc = %d\n", rc); return rc; } static const struct i2c_device_id mt9p012_km_i2c_id[] = { {"mt9p012_km", 0}, {} }; static struct i2c_driver mt9p012_km_i2c_driver = { .id_table = mt9p012_km_i2c_id, .probe = mt9p012_km_i2c_probe, .remove = __exit_p(mt9p012_km_i2c_remove), .driver = { .name = "mt9p012_km", }, }; static int mt9p012_km_sensor_probe(const struct msm_camera_sensor_info *info, struct msm_sensor_ctrl *s) { int rc = i2c_add_driver(&mt9p012_km_i2c_driver); if (rc < 0 || mt9p012_km_client == NULL) { rc = -ENOTSUPP; goto probe_done; } msm_camio_clk_rate_set(MT9P012_KM_DEFAULT_CLOCK_RATE); mdelay(20); rc = mt9p012_km_probe_init_sensor(info); if (rc < 0) goto probe_done; s->s_init = mt9p012_km_sensor_open_init; s->s_release = mt9p012_km_sensor_release; s->s_config = mt9p012_km_sensor_config; s->s_mount_angle = 0; mt9p012_km_probe_init_done(info); probe_done: CDBG("%s %s:%d\n", __FILE__, __func__, __LINE__); return rc; } static int __mt9p012_km_probe(struct platform_device *pdev) { return msm_camera_drv_start(pdev, mt9p012_km_sensor_probe); } static struct platform_driver msm_camera_driver = { .probe = __mt9p012_km_probe, .driver = { .name = "msm_camera_mt9p012_km", .owner = THIS_MODULE, }, }; static int __init mt9p012_km_init(void) { return platform_driver_register(&msm_camera_driver); } module_init(mt9p012_km_init);
gpl-2.0
val2k/linux
drivers/firewire/nosy.c
245
17997
/* * nosy - Snoop mode driver for TI PCILynx 1394 controllers * Copyright (C) 2002-2007 Kristian Høgsberg * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/device.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/kref.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/poll.h> #include <linux/sched.h> /* required for linux/wait.h */ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/time64.h> #include <linux/timex.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/atomic.h> #include <asm/byteorder.h> #include "nosy.h" #include "nosy-user.h" #define TCODE_PHY_PACKET 0x10 #define PCI_DEVICE_ID_TI_PCILYNX 0x8000 static char driver_name[] = KBUILD_MODNAME; /* this is the physical layout of a PCL, its size is 128 bytes */ struct pcl { __le32 next; __le32 async_error_next; u32 user_data; __le32 pcl_status; __le32 remaining_transfer_count; __le32 next_data_buffer; struct { __le32 control; __le32 pointer; } buffer[13]; }; struct packet { unsigned int length; char data[0]; }; struct packet_buffer { char *data; size_t capacity; long total_packet_count, lost_packet_count; atomic_t size; struct packet *head, *tail; wait_queue_head_t wait; }; struct pcilynx { struct pci_dev *pci_device; __iomem char *registers; struct pcl *rcv_start_pcl, *rcv_pcl; __le32 *rcv_buffer; dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus; spinlock_t client_list_lock; struct list_head client_list; struct miscdevice misc; struct list_head link; struct kref kref; }; static inline struct pcilynx * lynx_get(struct pcilynx *lynx) { kref_get(&lynx->kref); return lynx; } static void lynx_release(struct kref *kref) { kfree(container_of(kref, struct pcilynx, kref)); } static inline void lynx_put(struct pcilynx *lynx) { kref_put(&lynx->kref, lynx_release); } struct client { struct pcilynx *lynx; u32 tcode_mask; struct packet_buffer buffer; struct list_head link; }; static DEFINE_MUTEX(card_mutex); static LIST_HEAD(card_list); static int packet_buffer_init(struct packet_buffer *buffer, size_t capacity) { buffer->data = kmalloc(capacity, GFP_KERNEL); if (buffer->data == NULL) return -ENOMEM; buffer->head = (struct packet *) buffer->data; buffer->tail = (struct packet *) buffer->data; buffer->capacity = capacity; buffer->lost_packet_count = 0; atomic_set(&buffer->size, 0); init_waitqueue_head(&buffer->wait); return 0; } static void packet_buffer_destroy(struct packet_buffer *buffer) { kfree(buffer->data); } static int packet_buffer_get(struct client *client, char __user *data, size_t user_length) { struct packet_buffer *buffer = &client->buffer; size_t length; char *end; if (wait_event_interruptible(buffer->wait, atomic_read(&buffer->size) > 0) || list_empty(&client->lynx->link)) return -ERESTARTSYS; if (atomic_read(&buffer->size) == 0) return -ENODEV; /* FIXME: Check length <= user_length. */ end = buffer->data + buffer->capacity; length = buffer->head->length; if (&buffer->head->data[length] < end) { if (copy_to_user(data, buffer->head->data, length)) return -EFAULT; buffer->head = (struct packet *) &buffer->head->data[length]; } else { size_t split = end - buffer->head->data; if (copy_to_user(data, buffer->head->data, split)) return -EFAULT; if (copy_to_user(data + split, buffer->data, length - split)) return -EFAULT; buffer->head = (struct packet *) &buffer->data[length - split]; } /* * Decrease buffer->size as the last thing, since this is what * keeps the interrupt from overwriting the packet we are * retrieving from the buffer. */ atomic_sub(sizeof(struct packet) + length, &buffer->size); return length; } static void packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length) { char *end; buffer->total_packet_count++; if (buffer->capacity < atomic_read(&buffer->size) + sizeof(struct packet) + length) { buffer->lost_packet_count++; return; } end = buffer->data + buffer->capacity; buffer->tail->length = length; if (&buffer->tail->data[length] < end) { memcpy(buffer->tail->data, data, length); buffer->tail = (struct packet *) &buffer->tail->data[length]; } else { size_t split = end - buffer->tail->data; memcpy(buffer->tail->data, data, split); memcpy(buffer->data, data + split, length - split); buffer->tail = (struct packet *) &buffer->data[length - split]; } /* Finally, adjust buffer size and wake up userspace reader. */ atomic_add(sizeof(struct packet) + length, &buffer->size); wake_up_interruptible(&buffer->wait); } static inline void reg_write(struct pcilynx *lynx, int offset, u32 data) { writel(data, lynx->registers + offset); } static inline u32 reg_read(struct pcilynx *lynx, int offset) { return readl(lynx->registers + offset); } static inline void reg_set_bits(struct pcilynx *lynx, int offset, u32 mask) { reg_write(lynx, offset, (reg_read(lynx, offset) | mask)); } /* * Maybe the pcl programs could be set up to just append data instead * of using a whole packet. */ static inline void run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus, int dmachan) { reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus); reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20, DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK); } static int set_phy_reg(struct pcilynx *lynx, int addr, int val) { if (addr > 15) { dev_err(&lynx->pci_device->dev, "PHY register address %d out of range\n", addr); return -1; } if (val > 0xff) { dev_err(&lynx->pci_device->dev, "PHY register value %d out of range\n", val); return -1; } reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val)); return 0; } static int nosy_open(struct inode *inode, struct file *file) { int minor = iminor(inode); struct client *client; struct pcilynx *tmp, *lynx = NULL; mutex_lock(&card_mutex); list_for_each_entry(tmp, &card_list, link) if (tmp->misc.minor == minor) { lynx = lynx_get(tmp); break; } mutex_unlock(&card_mutex); if (lynx == NULL) return -ENODEV; client = kmalloc(sizeof *client, GFP_KERNEL); if (client == NULL) goto fail; client->tcode_mask = ~0; client->lynx = lynx; INIT_LIST_HEAD(&client->link); if (packet_buffer_init(&client->buffer, 128 * 1024) < 0) goto fail; file->private_data = client; return nonseekable_open(inode, file); fail: kfree(client); lynx_put(lynx); return -ENOMEM; } static int nosy_release(struct inode *inode, struct file *file) { struct client *client = file->private_data; struct pcilynx *lynx = client->lynx; spin_lock_irq(&lynx->client_list_lock); list_del_init(&client->link); spin_unlock_irq(&lynx->client_list_lock); packet_buffer_destroy(&client->buffer); kfree(client); lynx_put(lynx); return 0; } static unsigned int nosy_poll(struct file *file, poll_table *pt) { struct client *client = file->private_data; unsigned int ret = 0; poll_wait(file, &client->buffer.wait, pt); if (atomic_read(&client->buffer.size) > 0) ret = POLLIN | POLLRDNORM; if (list_empty(&client->lynx->link)) ret |= POLLHUP; return ret; } static ssize_t nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) { struct client *client = file->private_data; return packet_buffer_get(client, buffer, count); } static long nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct client *client = file->private_data; spinlock_t *client_list_lock = &client->lynx->client_list_lock; struct nosy_stats stats; switch (cmd) { case NOSY_IOC_GET_STATS: spin_lock_irq(client_list_lock); stats.total_packet_count = client->buffer.total_packet_count; stats.lost_packet_count = client->buffer.lost_packet_count; spin_unlock_irq(client_list_lock); if (copy_to_user((void __user *) arg, &stats, sizeof stats)) return -EFAULT; else return 0; case NOSY_IOC_START: spin_lock_irq(client_list_lock); list_add_tail(&client->link, &client->lynx->client_list); spin_unlock_irq(client_list_lock); return 0; case NOSY_IOC_STOP: spin_lock_irq(client_list_lock); list_del_init(&client->link); spin_unlock_irq(client_list_lock); return 0; case NOSY_IOC_FILTER: spin_lock_irq(client_list_lock); client->tcode_mask = arg; spin_unlock_irq(client_list_lock); return 0; default: return -EINVAL; /* Flush buffer, configure filter. */ } } static const struct file_operations nosy_ops = { .owner = THIS_MODULE, .read = nosy_read, .unlocked_ioctl = nosy_ioctl, .poll = nosy_poll, .open = nosy_open, .release = nosy_release, }; #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */ static void packet_irq_handler(struct pcilynx *lynx) { struct client *client; u32 tcode_mask, tcode, timestamp; size_t length; struct timespec64 ts64; /* FIXME: Also report rcv_speed. */ length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff; tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf; ktime_get_real_ts64(&ts64); timestamp = ts64.tv_nsec / NSEC_PER_USEC; lynx->rcv_buffer[0] = (__force __le32)timestamp; if (length == PHY_PACKET_SIZE) tcode_mask = 1 << TCODE_PHY_PACKET; else tcode_mask = 1 << tcode; spin_lock(&lynx->client_list_lock); list_for_each_entry(client, &lynx->client_list, link) if (client->tcode_mask & tcode_mask) packet_buffer_put(&client->buffer, lynx->rcv_buffer, length + 4); spin_unlock(&lynx->client_list_lock); } static void bus_reset_irq_handler(struct pcilynx *lynx) { struct client *client; struct timespec64 ts64; u32 timestamp; ktime_get_real_ts64(&ts64); timestamp = ts64.tv_nsec / NSEC_PER_USEC; spin_lock(&lynx->client_list_lock); list_for_each_entry(client, &lynx->client_list, link) packet_buffer_put(&client->buffer, &timestamp, 4); spin_unlock(&lynx->client_list_lock); } static irqreturn_t irq_handler(int irq, void *device) { struct pcilynx *lynx = device; u32 pci_int_status; pci_int_status = reg_read(lynx, PCI_INT_STATUS); if (pci_int_status == ~0) /* Card was ejected. */ return IRQ_NONE; if ((pci_int_status & PCI_INT_INT_PEND) == 0) /* Not our interrupt, bail out quickly. */ return IRQ_NONE; if ((pci_int_status & PCI_INT_P1394_INT) != 0) { u32 link_int_status; link_int_status = reg_read(lynx, LINK_INT_STATUS); reg_write(lynx, LINK_INT_STATUS, link_int_status); if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0) bus_reset_irq_handler(lynx); } /* Clear the PCI_INT_STATUS register only after clearing the * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will * be set again immediately. */ reg_write(lynx, PCI_INT_STATUS, pci_int_status); if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) { packet_irq_handler(lynx); run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); } return IRQ_HANDLED; } static void remove_card(struct pci_dev *dev) { struct pcilynx *lynx = pci_get_drvdata(dev); struct client *client; mutex_lock(&card_mutex); list_del_init(&lynx->link); misc_deregister(&lynx->misc); mutex_unlock(&card_mutex); reg_write(lynx, PCI_INT_ENABLE, 0); free_irq(lynx->pci_device->irq, lynx); spin_lock_irq(&lynx->client_list_lock); list_for_each_entry(client, &lynx->client_list, link) wake_up_interruptible(&client->buffer.wait); spin_unlock_irq(&lynx->client_list_lock); pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_pcl, lynx->rcv_pcl_bus); pci_free_consistent(lynx->pci_device, PAGE_SIZE, lynx->rcv_buffer, lynx->rcv_buffer_bus); iounmap(lynx->registers); pci_disable_device(dev); lynx_put(lynx); } #define RCV_BUFFER_SIZE (16 * 1024) static int add_card(struct pci_dev *dev, const struct pci_device_id *unused) { struct pcilynx *lynx; u32 p, end; int ret, i; if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { dev_err(&dev->dev, "DMA address limits not supported for PCILynx hardware\n"); return -ENXIO; } if (pci_enable_device(dev)) { dev_err(&dev->dev, "Failed to enable PCILynx hardware\n"); return -ENXIO; } pci_set_master(dev); lynx = kzalloc(sizeof *lynx, GFP_KERNEL); if (lynx == NULL) { dev_err(&dev->dev, "Failed to allocate control structure\n"); ret = -ENOMEM; goto fail_disable; } lynx->pci_device = dev; pci_set_drvdata(dev, lynx); spin_lock_init(&lynx->client_list_lock); INIT_LIST_HEAD(&lynx->client_list); kref_init(&lynx->kref); lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), PCILYNX_MAX_REGISTER); if (lynx->registers == NULL) { dev_err(&dev->dev, "Failed to map registers\n"); ret = -ENOMEM; goto fail_deallocate_lynx; } lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device, sizeof(struct pcl), &lynx->rcv_start_pcl_bus); lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device, sizeof(struct pcl), &lynx->rcv_pcl_bus); lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device, RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus); if (lynx->rcv_start_pcl == NULL || lynx->rcv_pcl == NULL || lynx->rcv_buffer == NULL) { dev_err(&dev->dev, "Failed to allocate receive buffer\n"); ret = -ENOMEM; goto fail_deallocate_buffers; } lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus); lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID); lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID); lynx->rcv_pcl->buffer[0].control = cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044); lynx->rcv_pcl->buffer[0].pointer = cpu_to_le32(lynx->rcv_buffer_bus + 4); p = lynx->rcv_buffer_bus + 2048; end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE; for (i = 1; p < end; i++, p += 2048) { lynx->rcv_pcl->buffer[i].control = cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048); lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p); } lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF); reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET); /* Fix buggy cards with autoboot pin not tied low: */ reg_write(lynx, DMA0_CHAN_CTRL, 0); reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24); #if 0 /* now, looking for PHY register set */ if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) { lynx->phyic.reg_1394a = 1; PRINT(KERN_INFO, lynx->id, "found 1394a conform PHY (using extended register set)"); lynx->phyic.vendor = get_phy_vendorid(lynx); lynx->phyic.product = get_phy_productid(lynx); } else { lynx->phyic.reg_1394a = 0; PRINT(KERN_INFO, lynx->id, "found old 1394 PHY"); } #endif /* Setup the general receive FIFO max size. */ reg_write(lynx, FIFO_SIZES, 255); reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL); reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK | LINK_INT_AT_STUCK | LINK_INT_SNTRJ | LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW | LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW); /* Disable the L flag in self ID packets. */ set_phy_reg(lynx, 4, 0); /* Put this baby into snoop mode */ reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE); run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); if (request_irq(dev->irq, irq_handler, IRQF_SHARED, driver_name, lynx)) { dev_err(&dev->dev, "Failed to allocate shared interrupt %d\n", dev->irq); ret = -EIO; goto fail_deallocate_buffers; } lynx->misc.parent = &dev->dev; lynx->misc.minor = MISC_DYNAMIC_MINOR; lynx->misc.name = "nosy"; lynx->misc.fops = &nosy_ops; mutex_lock(&card_mutex); ret = misc_register(&lynx->misc); if (ret) { dev_err(&dev->dev, "Failed to register misc char device\n"); mutex_unlock(&card_mutex); goto fail_free_irq; } list_add_tail(&lynx->link, &card_list); mutex_unlock(&card_mutex); dev_info(&dev->dev, "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq); return 0; fail_free_irq: reg_write(lynx, PCI_INT_ENABLE, 0); free_irq(lynx->pci_device->irq, lynx); fail_deallocate_buffers: if (lynx->rcv_start_pcl) pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); if (lynx->rcv_pcl) pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_pcl, lynx->rcv_pcl_bus); if (lynx->rcv_buffer) pci_free_consistent(lynx->pci_device, PAGE_SIZE, lynx->rcv_buffer, lynx->rcv_buffer_bus); iounmap(lynx->registers); fail_deallocate_lynx: kfree(lynx); fail_disable: pci_disable_device(dev); return ret; } static struct pci_device_id pci_table[] = { { .vendor = PCI_VENDOR_ID_TI, .device = PCI_DEVICE_ID_TI_PCILYNX, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, pci_table); static struct pci_driver lynx_pci_driver = { .name = driver_name, .id_table = pci_table, .probe = add_card, .remove = remove_card, }; module_pci_driver(lynx_pci_driver); MODULE_AUTHOR("Kristian Hoegsberg"); MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers"); MODULE_LICENSE("GPL");
gpl-2.0
ultrasystem/uavlinux-x3
net/sctp/transport.c
245
20142
/* SCTP kernel implementation * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 International Business Machines Corp. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * This module provides the abstraction for an SCTP tranport representing * a remote transport address. For local transport addresses, we just use * union sctp_addr. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Hui Huang <hui.huang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/types.h> #include <linux/random.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* 1st Level Abstractions. */ /* Initialize a new transport from provided memory. */ static struct sctp_transport *sctp_transport_init(struct net *net, struct sctp_transport *peer, const union sctp_addr *addr, gfp_t gfp) { /* Copy in the address. */ peer->ipaddr = *addr; peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); memset(&peer->saddr, 0, sizeof(union sctp_addr)); peer->sack_generation = 0; /* From 6.3.1 RTO Calculation: * * C1) Until an RTT measurement has been made for a packet sent to the * given destination transport address, set RTO to the protocol * parameter 'RTO.Initial'. */ peer->rto = msecs_to_jiffies(net->sctp.rto_initial); peer->last_time_heard = jiffies; peer->last_time_ecne_reduced = jiffies; peer->param_flags = SPP_HB_DISABLE | SPP_PMTUD_ENABLE | SPP_SACKDELAY_ENABLE; /* Initialize the default path max_retrans. */ peer->pathmaxrxt = net->sctp.max_retrans_path; peer->pf_retrans = net->sctp.pf_retrans; INIT_LIST_HEAD(&peer->transmitted); INIT_LIST_HEAD(&peer->send_ready); INIT_LIST_HEAD(&peer->transports); setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, (unsigned long)peer); setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, (unsigned long)peer); setup_timer(&peer->proto_unreach_timer, sctp_generate_proto_unreach_event, (unsigned long)peer); /* Initialize the 64-bit random nonce sent with heartbeat. */ get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); atomic_set(&peer->refcnt, 1); return peer; } /* Allocate and initialize a new transport. */ struct sctp_transport *sctp_transport_new(struct net *net, const union sctp_addr *addr, gfp_t gfp) { struct sctp_transport *transport; transport = kzalloc(sizeof(*transport), gfp); if (!transport) goto fail; if (!sctp_transport_init(net, transport, addr, gfp)) goto fail_init; SCTP_DBG_OBJCNT_INC(transport); return transport; fail_init: kfree(transport); fail: return NULL; } /* This transport is no longer needed. Free up if possible, or * delay until it last reference count. */ void sctp_transport_free(struct sctp_transport *transport) { transport->dead = 1; /* Try to delete the heartbeat timer. */ if (del_timer(&transport->hb_timer)) sctp_transport_put(transport); /* Delete the T3_rtx timer if it's active. * There is no point in not doing this now and letting * structure hang around in memory since we know * the tranport is going away. */ if (del_timer(&transport->T3_rtx_timer)) sctp_transport_put(transport); /* Delete the ICMP proto unreachable timer if it's active. */ if (del_timer(&transport->proto_unreach_timer)) sctp_association_put(transport->asoc); sctp_transport_put(transport); } static void sctp_transport_destroy_rcu(struct rcu_head *head) { struct sctp_transport *transport; transport = container_of(head, struct sctp_transport, rcu); dst_release(transport->dst); kfree(transport); SCTP_DBG_OBJCNT_DEC(transport); } /* Destroy the transport data structure. * Assumes there are no more users of this structure. */ static void sctp_transport_destroy(struct sctp_transport *transport) { if (unlikely(!transport->dead)) { WARN(1, "Attempt to destroy undead transport %p!\n", transport); return; } sctp_packet_free(&transport->packet); if (transport->asoc) sctp_association_put(transport->asoc); call_rcu(&transport->rcu, sctp_transport_destroy_rcu); } /* Start T3_rtx timer if it is not already running and update the heartbeat * timer. This routine is called every time a DATA chunk is sent. */ void sctp_transport_reset_timers(struct sctp_transport *transport) { /* RFC 2960 6.3.2 Retransmission Timer Rules * * R1) Every time a DATA chunk is sent to any address(including a * retransmission), if the T3-rtx timer of that address is not running * start it running so that it will expire after the RTO of that * address. */ if (!timer_pending(&transport->T3_rtx_timer)) if (!mod_timer(&transport->T3_rtx_timer, jiffies + transport->rto)) sctp_transport_hold(transport); /* When a data chunk is sent, reset the heartbeat interval. */ if (!mod_timer(&transport->hb_timer, sctp_transport_timeout(transport))) sctp_transport_hold(transport); } /* This transport has been assigned to an association. * Initialize fields from the association or from the sock itself. * Register the reference count in the association. */ void sctp_transport_set_owner(struct sctp_transport *transport, struct sctp_association *asoc) { transport->asoc = asoc; sctp_association_hold(asoc); } /* Initialize the pmtu of a transport. */ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) { /* If we don't have a fresh route, look one up */ if (!transport->dst || transport->dst->obsolete) { dst_release(transport->dst); transport->af_specific->get_dst(transport, &transport->saddr, &transport->fl, sk); } if (transport->dst) { transport->pathmtu = dst_mtu(transport->dst); } else transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu) { struct dst_entry *dst; if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); /* Use default minimum segment size and disable * pmtu discovery on this transport. */ t->pathmtu = SCTP_DEFAULT_MINSEGMENT; } else { t->pathmtu = pmtu; } dst = sctp_transport_dst_check(t); if (!dst) t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); if (dst) { dst->ops->update_pmtu(dst, sk, NULL, pmtu); dst = sctp_transport_dst_check(t); if (!dst) t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); } } /* Caches the dst entry and source address for a transport's destination * address. */ void sctp_transport_route(struct sctp_transport *transport, union sctp_addr *saddr, struct sctp_sock *opt) { struct sctp_association *asoc = transport->asoc; struct sctp_af *af = transport->af_specific; af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt)); if (saddr) memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); else af->get_saddr(opt, transport, &transport->fl); if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { return; } if (transport->dst) { transport->pathmtu = dst_mtu(transport->dst); /* Initialize sk->sk_rcv_saddr, if the transport is the * association's active path for getsockname(). */ if (asoc && (!asoc->peer.primary_path || (transport == asoc->peer.active_path))) opt->pf->af->to_sk_saddr(&transport->saddr, asoc->base.sk); } else transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } /* Hold a reference to a transport. */ void sctp_transport_hold(struct sctp_transport *transport) { atomic_inc(&transport->refcnt); } /* Release a reference to a transport and clean up * if there are no more references. */ void sctp_transport_put(struct sctp_transport *transport) { if (atomic_dec_and_test(&transport->refcnt)) sctp_transport_destroy(transport); } /* Update transport's RTO based on the newly calculated RTT. */ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) { if (unlikely(!tp->rto_pending)) /* We should not be doing any RTO updates unless rto_pending is set. */ pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp); if (tp->rttvar || tp->srtt) { struct net *net = sock_net(tp->asoc->base.sk); /* 6.3.1 C3) When a new RTT measurement R' is made, set * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' */ /* Note: The above algorithm has been rewritten to * express rto_beta and rto_alpha as inverse powers * of two. * For example, assuming the default value of RTO.Alpha of * 1/8, rto_alpha would be expressed as 3. */ tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta); tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) + (rtt >> net->sctp.rto_alpha); } else { /* 6.3.1 C2) When the first RTT measurement R is made, set * SRTT <- R, RTTVAR <- R/2. */ tp->srtt = rtt; tp->rttvar = rtt >> 1; } /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY. */ if (tp->rttvar == 0) tp->rttvar = SCTP_CLOCK_GRANULARITY; /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */ tp->rto = tp->srtt + (tp->rttvar << 2); /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min * seconds then it is rounded up to RTO.Min seconds. */ if (tp->rto < tp->asoc->rto_min) tp->rto = tp->asoc->rto_min; /* 6.3.1 C7) A maximum value may be placed on RTO provided it is * at least RTO.max seconds. */ if (tp->rto > tp->asoc->rto_max) tp->rto = tp->asoc->rto_max; sctp_max_rto(tp->asoc, tp); tp->rtt = rtt; /* Reset rto_pending so that a new RTT measurement is started when a * new data chunk is sent. */ tp->rto_pending = 0; pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n", __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto); } /* This routine updates the transport's cwnd and partial_bytes_acked * parameters based on the bytes acked in the received SACK. */ void sctp_transport_raise_cwnd(struct sctp_transport *transport, __u32 sack_ctsn, __u32 bytes_acked) { struct sctp_association *asoc = transport->asoc; __u32 cwnd, ssthresh, flight_size, pba, pmtu; cwnd = transport->cwnd; flight_size = transport->flight_size; /* See if we need to exit Fast Recovery first */ if (asoc->fast_recovery && TSN_lte(asoc->fast_recovery_exit, sack_ctsn)) asoc->fast_recovery = 0; /* The appropriate cwnd increase algorithm is performed if, and only * if the cumulative TSN whould advanced and the congestion window is * being fully utilized. */ if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) || (flight_size < cwnd)) return; ssthresh = transport->ssthresh; pba = transport->partial_bytes_acked; pmtu = transport->asoc->pathmtu; if (cwnd <= ssthresh) { /* RFC 4960 7.2.1 * o When cwnd is less than or equal to ssthresh, an SCTP * endpoint MUST use the slow-start algorithm to increase * cwnd only if the current congestion window is being fully * utilized, an incoming SACK advances the Cumulative TSN * Ack Point, and the data sender is not in Fast Recovery. * Only when these three conditions are met can the cwnd be * increased; otherwise, the cwnd MUST not be increased. * If these conditions are met, then cwnd MUST be increased * by, at most, the lesser of 1) the total size of the * previously outstanding DATA chunk(s) acknowledged, and * 2) the destination's path MTU. This upper bound protects * against the ACK-Splitting attack outlined in [SAVAGE99]. */ if (asoc->fast_recovery) return; if (bytes_acked > pmtu) cwnd += pmtu; else cwnd += bytes_acked; pr_debug("%s: slow start: transport:%p, bytes_acked:%d, " "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n", __func__, transport, bytes_acked, cwnd, ssthresh, flight_size, pba); } else { /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, * upon each SACK arrival that advances the Cumulative TSN Ack * Point, increase partial_bytes_acked by the total number of * bytes of all new chunks acknowledged in that SACK including * chunks acknowledged by the new Cumulative TSN Ack and by * Gap Ack Blocks. * * When partial_bytes_acked is equal to or greater than cwnd * and before the arrival of the SACK the sender had cwnd or * more bytes of data outstanding (i.e., before arrival of the * SACK, flightsize was greater than or equal to cwnd), * increase cwnd by MTU, and reset partial_bytes_acked to * (partial_bytes_acked - cwnd). */ pba += bytes_acked; if (pba >= cwnd) { cwnd += pmtu; pba = ((cwnd < pba) ? (pba - cwnd) : 0); } pr_debug("%s: congestion avoidance: transport:%p, " "bytes_acked:%d, cwnd:%d, ssthresh:%d, " "flight_size:%d, pba:%d\n", __func__, transport, bytes_acked, cwnd, ssthresh, flight_size, pba); } transport->cwnd = cwnd; transport->partial_bytes_acked = pba; } /* This routine is used to lower the transport's cwnd when congestion is * detected. */ void sctp_transport_lower_cwnd(struct sctp_transport *transport, sctp_lower_cwnd_t reason) { struct sctp_association *asoc = transport->asoc; switch (reason) { case SCTP_LOWER_CWND_T3_RTX: /* RFC 2960 Section 7.2.3, sctpimpguide * When the T3-rtx timer expires on an address, SCTP should * perform slow start by: * ssthresh = max(cwnd/2, 4*MTU) * cwnd = 1*MTU * partial_bytes_acked = 0 */ transport->ssthresh = max(transport->cwnd/2, 4*asoc->pathmtu); transport->cwnd = asoc->pathmtu; /* T3-rtx also clears fast recovery */ asoc->fast_recovery = 0; break; case SCTP_LOWER_CWND_FAST_RTX: /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the * destination address(es) to which the missing DATA chunks * were last sent, according to the formula described in * Section 7.2.3. * * RFC 2960 7.2.3, sctpimpguide Upon detection of packet * losses from SACK (see Section 7.2.4), An endpoint * should do the following: * ssthresh = max(cwnd/2, 4*MTU) * cwnd = ssthresh * partial_bytes_acked = 0 */ if (asoc->fast_recovery) return; /* Mark Fast recovery */ asoc->fast_recovery = 1; asoc->fast_recovery_exit = asoc->next_tsn - 1; transport->ssthresh = max(transport->cwnd/2, 4*asoc->pathmtu); transport->cwnd = transport->ssthresh; break; case SCTP_LOWER_CWND_ECNE: /* RFC 2481 Section 6.1.2. * If the sender receives an ECN-Echo ACK packet * then the sender knows that congestion was encountered in the * network on the path from the sender to the receiver. The * indication of congestion should be treated just as a * congestion loss in non-ECN Capable TCP. That is, the TCP * source halves the congestion window "cwnd" and reduces the * slow start threshold "ssthresh". * A critical condition is that TCP does not react to * congestion indications more than once every window of * data (or more loosely more than once every round-trip time). */ if (time_after(jiffies, transport->last_time_ecne_reduced + transport->rtt)) { transport->ssthresh = max(transport->cwnd/2, 4*asoc->pathmtu); transport->cwnd = transport->ssthresh; transport->last_time_ecne_reduced = jiffies; } break; case SCTP_LOWER_CWND_INACTIVE: /* RFC 2960 Section 7.2.1, sctpimpguide * When the endpoint does not transmit data on a given * transport address, the cwnd of the transport address * should be adjusted to max(cwnd/2, 4*MTU) per RTO. * NOTE: Although the draft recommends that this check needs * to be done every RTO interval, we do it every hearbeat * interval. */ transport->cwnd = max(transport->cwnd/2, 4*asoc->pathmtu); break; } transport->partial_bytes_acked = 0; pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n", __func__, transport, reason, transport->cwnd, transport->ssthresh); } /* Apply Max.Burst limit to the congestion window: * sctpimpguide-05 2.14.2 * D) When the time comes for the sender to * transmit new DATA chunks, the protocol parameter Max.Burst MUST * first be applied to limit how many new DATA chunks may be sent. * The limit is applied by adjusting cwnd as follows: * if ((flightsize+ Max.Burst * MTU) < cwnd) * cwnd = flightsize + Max.Burst * MTU */ void sctp_transport_burst_limited(struct sctp_transport *t) { struct sctp_association *asoc = t->asoc; u32 old_cwnd = t->cwnd; u32 max_burst_bytes; if (t->burst_limited || asoc->max_burst == 0) return; max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); if (max_burst_bytes < old_cwnd) { t->cwnd = max_burst_bytes; t->burst_limited = old_cwnd; } } /* Restore the old cwnd congestion window, after the burst had it's * desired effect. */ void sctp_transport_burst_reset(struct sctp_transport *t) { if (t->burst_limited) { t->cwnd = t->burst_limited; t->burst_limited = 0; } } /* What is the next timeout value for this transport? */ unsigned long sctp_transport_timeout(struct sctp_transport *t) { unsigned long timeout; timeout = t->rto + sctp_jitter(t->rto); if ((t->state != SCTP_UNCONFIRMED) && (t->state != SCTP_PF)) timeout += t->hbinterval; timeout += jiffies; return timeout; } /* Reset transport variables to their initial values */ void sctp_transport_reset(struct sctp_transport *t) { struct sctp_association *asoc = t->asoc; /* RFC 2960 (bis), Section 5.2.4 * All the congestion control parameters (e.g., cwnd, ssthresh) * related to this peer MUST be reset to their initial values * (see Section 6.2.1) */ t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); t->burst_limited = 0; t->ssthresh = asoc->peer.i.a_rwnd; t->rto = asoc->rto_initial; sctp_max_rto(asoc, t); t->rtt = 0; t->srtt = 0; t->rttvar = 0; /* Reset these additional varibles so that we have a clean * slate. */ t->partial_bytes_acked = 0; t->flight_size = 0; t->error_count = 0; t->rto_pending = 0; t->hb_sent = 0; /* Initialize the state information for SFR-CACC */ t->cacc.changeover_active = 0; t->cacc.cycling_changeover = 0; t->cacc.next_tsn_at_change = 0; t->cacc.cacc_saw_newack = 0; } /* Schedule retransmission on the given transport */ void sctp_transport_immediate_rtx(struct sctp_transport *t) { /* Stop pending T3_rtx_timer */ if (del_timer(&t->T3_rtx_timer)) sctp_transport_put(t); sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX); if (!timer_pending(&t->T3_rtx_timer)) { if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto)) sctp_transport_hold(t); } return; }
gpl-2.0
yank555-lu/SGS3-JB
drivers/misc/uart_select.c
501
3615
/* * uart_sel.c - UART Selection Driver * * Copyright (C) 2009 Samsung Electronics * Kim Kyuwon <q1.kim@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/uart_select.h> struct uart_select { struct uart_select_platform_data *pdata; struct rw_semaphore rwsem; }; static int uart_saved_state = UART_SW_PATH_NA; static ssize_t uart_select_show_state(struct device *dev, struct device_attribute *attr, char *buf) { struct uart_select *uart_sel = platform_get_drvdata(to_platform_device(dev)); int ret; int path; path = uart_sel->pdata->get_uart_switch(); down_read(&uart_sel->rwsem); uart_saved_state = path; if (path == UART_SW_PATH_NA) ret = sprintf(buf, "NA\n"); else if (path == UART_SW_PATH_CP) ret = sprintf(buf, "CP\n"); else ret = sprintf(buf, "AP\n"); up_read(&uart_sel->rwsem); return ret; } static ssize_t uart_select_store_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct uart_select *uart_sel = platform_get_drvdata(to_platform_device(dev)); struct uart_select_platform_data *pdata = uart_sel->pdata; int path; if (!count) return -EINVAL; down_write(&uart_sel->rwsem); if (!strncmp(buf, "CP", 2)) path = UART_SW_PATH_CP; else if (!strncmp(buf, "AP", 2)) path = UART_SW_PATH_AP; else { up_write(&uart_sel->rwsem); dev_err(dev, "Invalid cmd !!\n"); return -EINVAL; } pdata->set_uart_switch(path); uart_saved_state = path; up_write(&uart_sel->rwsem); return count; } static struct device_attribute uart_select_attr = { .attr = { .name = "path", .mode = 0644, }, .show = uart_select_show_state, .store = uart_select_store_state, }; /* Used in uart isr to avoid triggering sysrq when uart is not in AP */ int uart_sel_get_state(void) { if (uart_saved_state < 0) return -EPERM; else return uart_saved_state; } EXPORT_SYMBOL(uart_sel_get_state); static int __devinit uart_select_probe(struct platform_device *pdev) { struct uart_select *uart_sel; struct uart_select_platform_data *pdata = pdev->dev.platform_data; int ret; uart_sel = kzalloc(sizeof(struct uart_select), GFP_KERNEL); if (!uart_sel) { dev_err(&pdev->dev, "failed to allocate driver data\n"); return -ENOMEM; } platform_set_drvdata(pdev, uart_sel); uart_sel->pdata = pdata; init_rwsem(&uart_sel->rwsem); uart_saved_state = pdata->get_uart_switch(); ret = device_create_file(&pdev->dev, &uart_select_attr); if (ret) { dev_err(&pdev->dev, "failed to crreate device file\n"); return ret; } return 0; } static int __devexit uart_select_remove(struct platform_device *pdev) { device_remove_file(&pdev->dev, &uart_select_attr); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver uart_select_driver = { .probe = uart_select_probe, .remove = __devexit_p(uart_select_remove), .driver = { .name = "uart-select", .owner = THIS_MODULE, }, }; static int __init uart_select_init(void) { return platform_driver_register(&uart_select_driver); } module_init(uart_select_init); static void __exit uart_select_exit(void) { platform_driver_unregister(&uart_select_driver); } module_exit(uart_select_exit); MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>"); MODULE_DESCRIPTION("UART Selection Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
wwenigma/android_kernel_alcatel_cocktail
drivers/net/wireless/rt2x00/rt2x00pci.c
757
9108
/* Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> <http://rt2x00.serialmonkey.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Module: rt2x00pci Abstract: rt2x00 generic pci device routines. */ #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include "rt2x00.h" #include "rt2x00pci.h" /* * Register access. */ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, const struct rt2x00_field32 field, u32 *reg) { unsigned int i; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return 0; for (i = 0; i < REGISTER_BUSY_COUNT; i++) { rt2x00pci_register_read(rt2x00dev, offset, reg); if (!rt2x00_get_field32(*reg, field)) return 1; udelay(REGISTER_BUSY_DELAY); } ERROR(rt2x00dev, "Indirect register access failed: " "offset=0x%.08x, value=0x%.08x\n", offset, *reg); *reg = ~0; return 0; } EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); /* * TX data handlers. */ int rt2x00pci_write_tx_data(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; /* * This should not happen, we already checked the entry * was ours. When the hardware disagrees there has been * a queue corruption! */ if (unlikely(rt2x00dev->ops->lib->get_entry_state(entry))) { ERROR(rt2x00dev, "Corrupt queue %d, accessing entry which is not ours.\n" "Please file bug report to %s.\n", entry->queue->qid, DRV_PROJECT); return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data); /* * TX/RX data handlers. */ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue = rt2x00dev->rx; struct queue_entry *entry; struct queue_entry_priv_pci *entry_priv; struct skb_frame_desc *skbdesc; while (1) { entry = rt2x00queue_get_entry(queue, Q_INDEX); entry_priv = entry->priv_data; if (rt2x00dev->ops->lib->get_entry_state(entry)) break; /* * Fill in desc fields of the skb descriptor */ skbdesc = get_skb_frame_desc(entry->skb); skbdesc->desc = entry_priv->desc; skbdesc->desc_len = entry->queue->desc_size; /* * Send the frame to rt2x00lib for further processing. */ rt2x00lib_rxdone(rt2x00dev, entry); } } EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); /* * Device initialization handlers. */ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, struct data_queue *queue) { struct queue_entry_priv_pci *entry_priv; void *addr; dma_addr_t dma; unsigned int i; /* * Allocate DMA memory for descriptor and buffer. */ addr = dma_alloc_coherent(rt2x00dev->dev, queue->limit * queue->desc_size, &dma, GFP_KERNEL | GFP_DMA); if (!addr) return -ENOMEM; memset(addr, 0, queue->limit * queue->desc_size); /* * Initialize all queue entries to contain valid addresses. */ for (i = 0; i < queue->limit; i++) { entry_priv = queue->entries[i].priv_data; entry_priv->desc = addr + i * queue->desc_size; entry_priv->desc_dma = dma + i * queue->desc_size; } return 0; } static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, struct data_queue *queue) { struct queue_entry_priv_pci *entry_priv = queue->entries[0].priv_data; if (entry_priv->desc) dma_free_coherent(rt2x00dev->dev, queue->limit * queue->desc_size, entry_priv->desc, entry_priv->desc_dma); entry_priv->desc = NULL; } int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; int status; /* * Allocate DMA */ queue_for_each(rt2x00dev, queue) { status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); if (status) goto exit; } /* * Register interrupt handler. */ status = request_irq(rt2x00dev->irq, rt2x00dev->ops->lib->irq_handler, IRQF_SHARED, rt2x00dev->name, rt2x00dev); if (status) { ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", rt2x00dev->irq, status); goto exit; } return 0; exit: queue_for_each(rt2x00dev, queue) rt2x00pci_free_queue_dma(rt2x00dev, queue); return status; } EXPORT_SYMBOL_GPL(rt2x00pci_initialize); void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; /* * Free irq line. */ free_irq(rt2x00dev->irq, rt2x00dev); /* * Free DMA */ queue_for_each(rt2x00dev, queue) rt2x00pci_free_queue_dma(rt2x00dev, queue); } EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); /* * PCI driver handlers. */ static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) { kfree(rt2x00dev->rf); rt2x00dev->rf = NULL; kfree(rt2x00dev->eeprom); rt2x00dev->eeprom = NULL; if (rt2x00dev->csr.base) { iounmap(rt2x00dev->csr.base); rt2x00dev->csr.base = NULL; } } static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev) { struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev); rt2x00dev->csr.base = pci_ioremap_bar(pci_dev, 0); if (!rt2x00dev->csr.base) goto exit; rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); if (!rt2x00dev->eeprom) goto exit; rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); if (!rt2x00dev->rf) goto exit; return 0; exit: ERROR_PROBE("Failed to allocate registers.\n"); rt2x00pci_free_reg(rt2x00dev); return -ENOMEM; } int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data; struct ieee80211_hw *hw; struct rt2x00_dev *rt2x00dev; int retval; retval = pci_request_regions(pci_dev, pci_name(pci_dev)); if (retval) { ERROR_PROBE("PCI request regions failed.\n"); return retval; } retval = pci_enable_device(pci_dev); if (retval) { ERROR_PROBE("Enable device failed.\n"); goto exit_release_regions; } pci_set_master(pci_dev); if (pci_set_mwi(pci_dev)) ERROR_PROBE("MWI not available.\n"); if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { ERROR_PROBE("PCI DMA not supported.\n"); retval = -EIO; goto exit_disable_device; } hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { ERROR_PROBE("Failed to allocate hardware.\n"); retval = -ENOMEM; goto exit_disable_device; } pci_set_drvdata(pci_dev, hw); rt2x00dev = hw->priv; rt2x00dev->dev = &pci_dev->dev; rt2x00dev->ops = ops; rt2x00dev->hw = hw; rt2x00dev->irq = pci_dev->irq; rt2x00dev->name = pci_name(pci_dev); rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); retval = rt2x00pci_alloc_reg(rt2x00dev); if (retval) goto exit_free_device; retval = rt2x00lib_probe_dev(rt2x00dev); if (retval) goto exit_free_reg; return 0; exit_free_reg: rt2x00pci_free_reg(rt2x00dev); exit_free_device: ieee80211_free_hw(hw); exit_disable_device: if (retval != -EBUSY) pci_disable_device(pci_dev); exit_release_regions: pci_release_regions(pci_dev); pci_set_drvdata(pci_dev, NULL); return retval; } EXPORT_SYMBOL_GPL(rt2x00pci_probe); void rt2x00pci_remove(struct pci_dev *pci_dev) { struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); struct rt2x00_dev *rt2x00dev = hw->priv; /* * Free all allocated data. */ rt2x00lib_remove_dev(rt2x00dev); rt2x00pci_free_reg(rt2x00dev); ieee80211_free_hw(hw); /* * Free the PCI device data. */ pci_set_drvdata(pci_dev, NULL); pci_disable_device(pci_dev); pci_release_regions(pci_dev); } EXPORT_SYMBOL_GPL(rt2x00pci_remove); #ifdef CONFIG_PM int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state) { struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); struct rt2x00_dev *rt2x00dev = hw->priv; int retval; retval = rt2x00lib_suspend(rt2x00dev, state); if (retval) return retval; pci_save_state(pci_dev); pci_disable_device(pci_dev); return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); } EXPORT_SYMBOL_GPL(rt2x00pci_suspend); int rt2x00pci_resume(struct pci_dev *pci_dev) { struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); struct rt2x00_dev *rt2x00dev = hw->priv; if (pci_set_power_state(pci_dev, PCI_D0) || pci_enable_device(pci_dev) || pci_restore_state(pci_dev)) { ERROR(rt2x00dev, "Failed to resume device.\n"); return -EIO; } return rt2x00lib_resume(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00pci_resume); #endif /* CONFIG_PM */ /* * rt2x00pci module information. */ MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("rt2x00 pci library"); MODULE_LICENSE("GPL");
gpl-2.0
Maroc-OS/Merruk-Technology
common/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
1525
9350
/* * Intel IXP4xx Queue Manager driver for Linux * * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. */ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <mach/qmgr.h> struct qmgr_regs __iomem *qmgr_regs; static struct resource *mem_res; static spinlock_t qmgr_lock; static u32 used_sram_bitmap[4]; /* 128 16-dword pages */ static void (*irq_handlers[QUEUES])(void *pdev); static void *irq_pdevs[QUEUES]; #if DEBUG_QMGR char qmgr_queue_descs[QUEUES][32]; #endif void qmgr_set_irq(unsigned int queue, int src, void (*handler)(void *pdev), void *pdev) { unsigned long flags; spin_lock_irqsave(&qmgr_lock, flags); if (queue < HALF_QUEUES) { u32 __iomem *reg; int bit; BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL); reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */ bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), reg); } else /* IRQ source for queues 32-63 is fixed */ BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY); irq_handlers[queue] = handler; irq_pdevs[queue] = pdev; spin_unlock_irqrestore(&qmgr_lock, flags); } static irqreturn_t qmgr_irq1_a0(int irq, void *pdev) { int i, ret = 0; u32 en_bitmap, src, stat; /* ACK - it may clear any bits so don't rely on it */ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]); en_bitmap = qmgr_regs->irqen[0]; while (en_bitmap) { i = __fls(en_bitmap); /* number of the last "low" queue */ en_bitmap &= ~BIT(i); src = qmgr_regs->irqsrc[i >> 3]; stat = qmgr_regs->stat1[i >> 3]; if (src & 4) /* the IRQ condition is inverted */ stat = ~stat; if (stat & BIT(src & 3)) { irq_handlers[i](irq_pdevs[i]); ret = IRQ_HANDLED; } } return ret; } static irqreturn_t qmgr_irq2_a0(int irq, void *pdev) { int i, ret = 0; u32 req_bitmap; /* ACK - it may clear any bits so don't rely on it */ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]); req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h; while (req_bitmap) { i = __fls(req_bitmap); /* number of the last "high" queue */ req_bitmap &= ~BIT(i); irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]); ret = IRQ_HANDLED; } return ret; } static irqreturn_t qmgr_irq(int irq, void *pdev) { int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1); u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]); if (!req_bitmap) return 0; __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */ while (req_bitmap) { i = __fls(req_bitmap); /* number of the last queue */ req_bitmap &= ~BIT(i); i += half * HALF_QUEUES; irq_handlers[i](irq_pdevs[i]); } return IRQ_HANDLED; } void qmgr_enable_irq(unsigned int queue) { unsigned long flags; int half = queue / 32; u32 mask = 1 << (queue & (HALF_QUEUES - 1)); spin_lock_irqsave(&qmgr_lock, flags); __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask, &qmgr_regs->irqen[half]); spin_unlock_irqrestore(&qmgr_lock, flags); } void qmgr_disable_irq(unsigned int queue) { unsigned long flags; int half = queue / 32; u32 mask = 1 << (queue & (HALF_QUEUES - 1)); spin_lock_irqsave(&qmgr_lock, flags); __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask, &qmgr_regs->irqen[half]); __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */ spin_unlock_irqrestore(&qmgr_lock, flags); } static inline void shift_mask(u32 *mask) { mask[3] = mask[3] << 1 | mask[2] >> 31; mask[2] = mask[2] << 1 | mask[1] >> 31; mask[1] = mask[1] << 1 | mask[0] >> 31; mask[0] <<= 1; } #if DEBUG_QMGR int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, unsigned int nearly_empty_watermark, unsigned int nearly_full_watermark, const char *desc_format, const char* name) #else int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, unsigned int nearly_empty_watermark, unsigned int nearly_full_watermark) #endif { u32 cfg, addr = 0, mask[4]; /* in 16-dwords */ int err; BUG_ON(queue >= QUEUES); if ((nearly_empty_watermark | nearly_full_watermark) & ~7) return -EINVAL; switch (len) { case 16: cfg = 0 << 24; mask[0] = 0x1; break; case 32: cfg = 1 << 24; mask[0] = 0x3; break; case 64: cfg = 2 << 24; mask[0] = 0xF; break; case 128: cfg = 3 << 24; mask[0] = 0xFF; break; default: return -EINVAL; } cfg |= nearly_empty_watermark << 26; cfg |= nearly_full_watermark << 29; len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */ mask[1] = mask[2] = mask[3] = 0; if (!try_module_get(THIS_MODULE)) return -ENODEV; spin_lock_irq(&qmgr_lock); if (__raw_readl(&qmgr_regs->sram[queue])) { err = -EBUSY; goto err; } while (1) { if (!(used_sram_bitmap[0] & mask[0]) && !(used_sram_bitmap[1] & mask[1]) && !(used_sram_bitmap[2] & mask[2]) && !(used_sram_bitmap[3] & mask[3])) break; /* found free space */ addr++; shift_mask(mask); if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) { printk(KERN_ERR "qmgr: no free SRAM space for" " queue %i\n", queue); err = -ENOMEM; goto err; } } used_sram_bitmap[0] |= mask[0]; used_sram_bitmap[1] |= mask[1]; used_sram_bitmap[2] |= mask[2]; used_sram_bitmap[3] |= mask[3]; __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]); #if DEBUG_QMGR snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]), desc_format, name); printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n", qmgr_queue_descs[queue], queue, addr); #endif spin_unlock_irq(&qmgr_lock); return 0; err: spin_unlock_irq(&qmgr_lock); module_put(THIS_MODULE); return err; } void qmgr_release_queue(unsigned int queue) { u32 cfg, addr, mask[4]; BUG_ON(queue >= QUEUES); /* not in valid range */ spin_lock_irq(&qmgr_lock); cfg = __raw_readl(&qmgr_regs->sram[queue]); addr = (cfg >> 14) & 0xFF; BUG_ON(!addr); /* not requested */ switch ((cfg >> 24) & 3) { case 0: mask[0] = 0x1; break; case 1: mask[0] = 0x3; break; case 2: mask[0] = 0xF; break; case 3: mask[0] = 0xFF; break; } mask[1] = mask[2] = mask[3] = 0; while (addr--) shift_mask(mask); #if DEBUG_QMGR printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n", qmgr_queue_descs[queue], queue); qmgr_queue_descs[queue][0] = '\x0'; #endif __raw_writel(0, &qmgr_regs->sram[queue]); used_sram_bitmap[0] &= ~mask[0]; used_sram_bitmap[1] &= ~mask[1]; used_sram_bitmap[2] &= ~mask[2]; used_sram_bitmap[3] &= ~mask[3]; irq_handlers[queue] = NULL; /* catch IRQ bugs */ spin_unlock_irq(&qmgr_lock); module_put(THIS_MODULE); while ((addr = qmgr_get_entry(queue))) printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n", queue, addr); } static int qmgr_init(void) { int i, err; irq_handler_t handler1, handler2; mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE, "IXP4xx Queue Manager"); if (mem_res == NULL) return -EBUSY; qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); if (qmgr_regs == NULL) { err = -ENOMEM; goto error_map; } /* reset qmgr registers */ for (i = 0; i < 4; i++) { __raw_writel(0x33333333, &qmgr_regs->stat1[i]); __raw_writel(0, &qmgr_regs->irqsrc[i]); } for (i = 0; i < 2; i++) { __raw_writel(0, &qmgr_regs->stat2[i]); __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */ __raw_writel(0, &qmgr_regs->irqen[i]); } __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h); __raw_writel(0, &qmgr_regs->statf_h); for (i = 0; i < QUEUES; i++) __raw_writel(0, &qmgr_regs->sram[i]); if (cpu_is_ixp42x_rev_a0()) { handler1 = qmgr_irq1_a0; handler2 = qmgr_irq2_a0; } else handler1 = handler2 = qmgr_irq; err = request_irq(IRQ_IXP4XX_QM1, handler1, 0, "IXP4xx Queue Manager", NULL); if (err) { printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n", IRQ_IXP4XX_QM1, err); goto error_irq; } err = request_irq(IRQ_IXP4XX_QM2, handler2, 0, "IXP4xx Queue Manager", NULL); if (err) { printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n", IRQ_IXP4XX_QM2, err); goto error_irq2; } used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */ spin_lock_init(&qmgr_lock); printk(KERN_INFO "IXP4xx Queue Manager initialized.\n"); return 0; error_irq2: free_irq(IRQ_IXP4XX_QM1, NULL); error_irq: iounmap(qmgr_regs); error_map: release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); return err; } static void qmgr_remove(void) { free_irq(IRQ_IXP4XX_QM1, NULL); free_irq(IRQ_IXP4XX_QM2, NULL); synchronize_irq(IRQ_IXP4XX_QM1); synchronize_irq(IRQ_IXP4XX_QM2); iounmap(qmgr_regs); release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); } module_init(qmgr_init); module_exit(qmgr_remove); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Krzysztof Halasa"); EXPORT_SYMBOL(qmgr_regs); EXPORT_SYMBOL(qmgr_set_irq); EXPORT_SYMBOL(qmgr_enable_irq); EXPORT_SYMBOL(qmgr_disable_irq); #if DEBUG_QMGR EXPORT_SYMBOL(qmgr_queue_descs); EXPORT_SYMBOL(qmgr_request_queue); #else EXPORT_SYMBOL(__qmgr_request_queue); #endif EXPORT_SYMBOL(qmgr_release_queue);
gpl-2.0
stevegaron/android-kernel-tuna
drivers/staging/msm/mdp4_debugfs.c
2293
3953
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/hrtimer.h> #include <linux/clk.h> #include <mach/hardware.h> #include <linux/io.h> #include <linux/debugfs.h> #include <asm/system.h> #include <asm/mach-types.h> #include <linux/semaphore.h> #include <linux/uaccess.h> #include "mdp.h" #include "msm_fb.h" #include "mdp4.h" #define MDP4_DEBUG_BUF 128 static char mdp4_debug_buf[MDP4_DEBUG_BUF]; static ulong mdp4_debug_offset; static ulong mdp4_base_addr; static int mdp4_offset_set(void *data, u64 val) { mdp4_debug_offset = (int)val; return 0; } static int mdp4_offset_get(void *data, u64 *val) { *val = (u64)mdp4_debug_offset; return 0; } DEFINE_SIMPLE_ATTRIBUTE( mdp4_offset_fops, mdp4_offset_get, mdp4_offset_set, "%llx\n"); static int mdp4_debugfs_release(struct inode *inode, struct file *file) { return 0; } static ssize_t mdp4_debugfs_write( struct file *file, const char __user *buff, size_t count, loff_t *ppos) { int cnt; unsigned int data; printk(KERN_INFO "%s: offset=%d count=%d *ppos=%d\n", __func__, (int)mdp4_debug_offset, (int)count, (int)*ppos); if (count > sizeof(mdp4_debug_buf)) return -EFAULT; if (copy_from_user(mdp4_debug_buf, buff, count)) return -EFAULT; mdp4_debug_buf[count] = 0; /* end of string */ cnt = sscanf(mdp4_debug_buf, "%x", &data); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } writel(&data, mdp4_base_addr + mdp4_debug_offset); return 0; } static ssize_t mdp4_debugfs_read( struct file *file, char __user *buff, size_t count, loff_t *ppos) { int len = 0; unsigned int data; printk(KERN_INFO "%s: offset=%d count=%d *ppos=%d\n", __func__, (int)mdp4_debug_offset, (int)count, (int)*ppos); if (*ppos) return 0; /* the end */ data = readl(mdp4_base_addr + mdp4_debug_offset); len = snprintf(mdp4_debug_buf, 4, "%x\n", data); if (len > 0) { if (len > count) len = count; if (copy_to_user(buff, mdp4_debug_buf, len)) return -EFAULT; } printk(KERN_INFO "%s: len=%d\n", __func__, len); if (len < 0) return 0; *ppos += len; /* increase offset */ return len; } static const struct file_operations mdp4_debugfs_fops = { .open = nonseekable_open, .release = mdp4_debugfs_release, .read = mdp4_debugfs_read, .write = mdp4_debugfs_write, .llseek = no_llseek, }; int mdp4_debugfs_init(void) { struct dentry *dent = debugfs_create_dir("mdp4", NULL); if (IS_ERR(dent)) { printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n", __FILE__, __LINE__, PTR_ERR(dent)); return -1; } if (debugfs_create_file("offset", 0644, dent, 0, &mdp4_offset_fops) == NULL) { printk(KERN_ERR "%s(%d): debugfs_create_file: offset fail\n", __FILE__, __LINE__); return -1; } if (debugfs_create_file("regs", 0644, dent, 0, &mdp4_debugfs_fops) == NULL) { printk(KERN_ERR "%s(%d): debugfs_create_file: regs fail\n", __FILE__, __LINE__); return -1; } mdp4_debug_offset = 0; mdp4_base_addr = (ulong) msm_mdp_base; /* defined at msm_fb_def.h */ return 0; }
gpl-2.0
Ironjim41/angler_kernel
drivers/staging/winbond/phy_calibration.c
2293
50106
/* * phy_302_calibration.c * * Copyright (C) 2002, 2005 Winbond Electronics Corp. * * modification history * --------------------------------------------------------------------------- * 0.01.001, 2003-04-16, Kevin created * */ /****************** INCLUDE FILES SECTION ***********************************/ #include "phy_calibration.h" #include "wbhal.h" #include "wb35reg_f.h" #include "core.h" /****************** DEBUG CONSTANT AND MACRO SECTION ************************/ /****************** LOCAL CONSTANT AND MACRO SECTION ************************/ #define LOOP_TIMES 20 #define US 1000/* MICROSECOND*/ #define AG_CONST 0.6072529350 #define FIXED(X) ((s32)((X) * 32768.0)) #define DEG2RAD(X) (0.017453 * (X)) static const s32 Angles[] = { FIXED(DEG2RAD(45.0)), FIXED(DEG2RAD(26.565)), FIXED(DEG2RAD(14.0362)), FIXED(DEG2RAD(7.12502)), FIXED(DEG2RAD(3.57633)), FIXED(DEG2RAD(1.78991)), FIXED(DEG2RAD(0.895174)), FIXED(DEG2RAD(0.447614)), FIXED(DEG2RAD(0.223811)), FIXED(DEG2RAD(0.111906)), FIXED(DEG2RAD(0.055953)), FIXED(DEG2RAD(0.027977)) }; /****************** LOCAL FUNCTION DECLARATION SECTION **********************/ /* * void _phy_rf_write_delay(struct hw_data *phw_data); * void phy_init_rf(struct hw_data *phw_data); */ /****************** FUNCTION DEFINITION SECTION *****************************/ s32 _s13_to_s32(u32 data) { u32 val; val = (data & 0x0FFF); if ((data & BIT(12)) != 0) val |= 0xFFFFF000; return (s32) val; } u32 _s32_to_s13(s32 data) { u32 val; if (data > 4095) data = 4095; else if (data < -4096) data = -4096; val = data & 0x1FFF; return val; } /****************************************************************************/ s32 _s4_to_s32(u32 data) { s32 val; val = (data & 0x0007); if ((data & BIT(3)) != 0) val |= 0xFFFFFFF8; return val; } u32 _s32_to_s4(s32 data) { u32 val; if (data > 7) data = 7; else if (data < -8) data = -8; val = data & 0x000F; return val; } /****************************************************************************/ s32 _s5_to_s32(u32 data) { s32 val; val = (data & 0x000F); if ((data & BIT(4)) != 0) val |= 0xFFFFFFF0; return val; } u32 _s32_to_s5(s32 data) { u32 val; if (data > 15) data = 15; else if (data < -16) data = -16; val = data & 0x001F; return val; } /****************************************************************************/ s32 _s6_to_s32(u32 data) { s32 val; val = (data & 0x001F); if ((data & BIT(5)) != 0) val |= 0xFFFFFFE0; return val; } u32 _s32_to_s6(s32 data) { u32 val; if (data > 31) data = 31; else if (data < -32) data = -32; val = data & 0x003F; return val; } /****************************************************************************/ s32 _s9_to_s32(u32 data) { s32 val; val = data & 0x00FF; if ((data & BIT(8)) != 0) val |= 0xFFFFFF00; return val; } u32 _s32_to_s9(s32 data) { u32 val; if (data > 255) data = 255; else if (data < -256) data = -256; val = data & 0x01FF; return val; } /****************************************************************************/ s32 _floor(s32 n) { if (n > 0) n += 5; else n -= 5; return n/10; } /****************************************************************************/ /* * The following code is sqare-root function. * sqsum is the input and the output is sq_rt; * The maximum of sqsum = 2^27 -1; */ u32 _sqrt(u32 sqsum) { u32 sq_rt; int g0, g1, g2, g3, g4; int seed; int next; int step; g4 = sqsum / 100000000; g3 = (sqsum - g4*100000000) / 1000000; g2 = (sqsum - g4*100000000 - g3*1000000) / 10000; g1 = (sqsum - g4*100000000 - g3*1000000 - g2*10000) / 100; g0 = (sqsum - g4*100000000 - g3*1000000 - g2*10000 - g1*100); next = g4; step = 0; seed = 0; while (((seed+1)*(step+1)) <= next) { step++; seed++; } sq_rt = seed * 10000; next = (next-(seed*step))*100 + g3; step = 0; seed = 2 * seed * 10; while (((seed+1)*(step+1)) <= next) { step++; seed++; } sq_rt = sq_rt + step * 1000; next = (next - seed * step) * 100 + g2; seed = (seed + step) * 10; step = 0; while (((seed+1)*(step+1)) <= next) { step++; seed++; } sq_rt = sq_rt + step * 100; next = (next - seed * step) * 100 + g1; seed = (seed + step) * 10; step = 0; while (((seed+1)*(step+1)) <= next) { step++; seed++; } sq_rt = sq_rt + step * 10; next = (next - seed * step) * 100 + g0; seed = (seed + step) * 10; step = 0; while (((seed+1)*(step+1)) <= next) { step++; seed++; } sq_rt = sq_rt + step; return sq_rt; } /****************************************************************************/ void _sin_cos(s32 angle, s32 *sin, s32 *cos) { s32 X, Y, TargetAngle, CurrAngle; unsigned Step; X = FIXED(AG_CONST); /* AG_CONST * cos(0) */ Y = 0; /* AG_CONST * sin(0) */ TargetAngle = abs(angle); CurrAngle = 0; for (Step = 0; Step < 12; Step++) { s32 NewX; if (TargetAngle > CurrAngle) { NewX = X - (Y >> Step); Y = (X >> Step) + Y; X = NewX; CurrAngle += Angles[Step]; } else { NewX = X + (Y >> Step); Y = -(X >> Step) + Y; X = NewX; CurrAngle -= Angles[Step]; } } if (angle > 0) { *cos = X; *sin = Y; } else { *cos = X; *sin = -Y; } } static unsigned char hal_get_dxx_reg(struct hw_data *pHwData, u16 number, u32 * pValue) { if (number < 0x1000) number += 0x1000; return Wb35Reg_ReadSync(pHwData, number, pValue); } #define hw_get_dxx_reg(_A, _B, _C) hal_get_dxx_reg(_A, _B, (u32 *)_C) static unsigned char hal_set_dxx_reg(struct hw_data *pHwData, u16 number, u32 value) { unsigned char ret; if (number < 0x1000) number += 0x1000; ret = Wb35Reg_WriteSync(pHwData, number, value); return ret; } #define hw_set_dxx_reg(_A, _B, _C) hal_set_dxx_reg(_A, _B, (u32)_C) void _reset_rx_cal(struct hw_data *phw_data) { u32 val; hw_get_dxx_reg(phw_data, 0x54, &val); if (phw_data->revision == 0x2002) /* 1st-cut */ val &= 0xFFFF0000; else /* 2nd-cut */ val &= 0x000003FF; hw_set_dxx_reg(phw_data, 0x54, val); } /**************for winbond calibration*********/ /**********************************************/ void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequency) { u32 reg_agc_ctrl3; u32 reg_a_acq_ctrl; u32 reg_b_acq_ctrl; u32 val; PHY_DEBUG(("[CAL] -> [1]_rxadc_dc_offset_cancellation()\n")); phy_init_rf(phw_data); /* set calibration channel */ if ((RF_WB_242 == phw_data->phy_type) || (RF_WB_242_1 == phw_data->phy_type)) /* 20060619.5 Add */{ if ((frequency >= 2412) && (frequency <= 2484)) { /* w89rf242 change frequency to 2390Mhz */ PHY_DEBUG(("[CAL] W89RF242/11G/Channel=2390Mhz\n")); phy_set_rf_data(phw_data, 3, (3<<24)|0x025586); } } else { } /* reset cancel_dc_i[9:5] and cancel_dc_q[4:0] in register DC_Cancel */ hw_get_dxx_reg(phw_data, 0x5C, &val); val &= ~(0x03FF); hw_set_dxx_reg(phw_data, 0x5C, val); /* reset the TX and RX IQ calibration data */ hw_set_dxx_reg(phw_data, 0x3C, 0); hw_set_dxx_reg(phw_data, 0x54, 0); hw_set_dxx_reg(phw_data, 0x58, 0x30303030); /* IQ_Alpha Changed */ /* a. Disable AGC */ hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &reg_agc_ctrl3); reg_agc_ctrl3 &= ~BIT(2); reg_agc_ctrl3 |= (MASK_LNA_FIX_GAIN|MASK_AGC_FIX); hw_set_dxx_reg(phw_data, REG_AGC_CTRL3, reg_agc_ctrl3); hw_get_dxx_reg(phw_data, REG_AGC_CTRL5, &val); val |= MASK_AGC_FIX_GAIN; hw_set_dxx_reg(phw_data, REG_AGC_CTRL5, val); /* b. Turn off BB RX */ hw_get_dxx_reg(phw_data, REG_A_ACQ_CTRL, &reg_a_acq_ctrl); reg_a_acq_ctrl |= MASK_AMER_OFF_REG; hw_set_dxx_reg(phw_data, REG_A_ACQ_CTRL, reg_a_acq_ctrl); hw_get_dxx_reg(phw_data, REG_B_ACQ_CTRL, &reg_b_acq_ctrl); reg_b_acq_ctrl |= MASK_BMER_OFF_REG; hw_set_dxx_reg(phw_data, REG_B_ACQ_CTRL, reg_b_acq_ctrl); /* c. Make sure MAC is in receiving mode * d. Turn ON ADC calibration * - ADC calibrator is triggered by this signal rising from 0 to 1 */ hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &val); val &= ~MASK_ADC_DC_CAL_STR; hw_set_dxx_reg(phw_data, REG_MODE_CTRL, val); val |= MASK_ADC_DC_CAL_STR; hw_set_dxx_reg(phw_data, REG_MODE_CTRL, val); /* e. The results are shown in "adc_dc_cal_i[8:0] and adc_dc_cal_q[8:0]" */ #ifdef _DEBUG hw_get_dxx_reg(phw_data, REG_OFFSET_READ, &val); PHY_DEBUG(("[CAL] REG_OFFSET_READ = 0x%08X\n", val)); PHY_DEBUG(("[CAL] ** adc_dc_cal_i = %d (0x%04X)\n", _s9_to_s32(val&0x000001FF), val&0x000001FF)); PHY_DEBUG(("[CAL] ** adc_dc_cal_q = %d (0x%04X)\n", _s9_to_s32((val&0x0003FE00)>>9), (val&0x0003FE00)>>9)); #endif hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &val); val &= ~MASK_ADC_DC_CAL_STR; hw_set_dxx_reg(phw_data, REG_MODE_CTRL, val); /* f. Turn on BB RX */ /* hw_get_dxx_reg(phw_data, REG_A_ACQ_CTRL, &reg_a_acq_ctrl); */ reg_a_acq_ctrl &= ~MASK_AMER_OFF_REG; hw_set_dxx_reg(phw_data, REG_A_ACQ_CTRL, reg_a_acq_ctrl); /* hw_get_dxx_reg(phw_data, REG_B_ACQ_CTRL, &reg_b_acq_ctrl); */ reg_b_acq_ctrl &= ~MASK_BMER_OFF_REG; hw_set_dxx_reg(phw_data, REG_B_ACQ_CTRL, reg_b_acq_ctrl); /* g. Enable AGC */ /* hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &val); */ reg_agc_ctrl3 |= BIT(2); reg_agc_ctrl3 &= ~(MASK_LNA_FIX_GAIN|MASK_AGC_FIX); hw_set_dxx_reg(phw_data, REG_AGC_CTRL3, reg_agc_ctrl3); } /****************************************************************/ void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data) { u32 reg_agc_ctrl3; u32 reg_mode_ctrl; u32 reg_dc_cancel; s32 iqcal_image_i; s32 iqcal_image_q; u32 sqsum; s32 mag_0; s32 mag_1; s32 fix_cancel_dc_i = 0; u32 val; int loop; PHY_DEBUG(("[CAL] -> [2]_txidac_dc_offset_cancellation()\n")); /* a. Set to "TX calibration mode" */ /* 0x01 0xEE3FC2 ; 3B8FF ; Calibration (6a). enable TX IQ calibration loop circuits */ phy_set_rf_data(phw_data, 1, (1<<24)|0xEE3FC2); /* 0x0B 0x1905D6 ; 06417 ; Calibration (6b). enable TX I/Q cal loop squaring circuit */ phy_set_rf_data(phw_data, 11, (11<<24)|0x1901D6); /* 0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized */ phy_set_rf_data(phw_data, 5, (5<<24)|0x24C48A); /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */ phy_set_rf_data(phw_data, 6, (6<<24)|0x06890C); /* 0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode */ phy_set_rf_data(phw_data, 0, (0<<24)|0xFDF1C0); hw_set_dxx_reg(phw_data, 0x58, 0x30303030); /* IQ_Alpha Changed */ /* a. Disable AGC */ hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &reg_agc_ctrl3); reg_agc_ctrl3 &= ~BIT(2); reg_agc_ctrl3 |= (MASK_LNA_FIX_GAIN|MASK_AGC_FIX); hw_set_dxx_reg(phw_data, REG_AGC_CTRL3, reg_agc_ctrl3); hw_get_dxx_reg(phw_data, REG_AGC_CTRL5, &val); val |= MASK_AGC_FIX_GAIN; hw_set_dxx_reg(phw_data, REG_AGC_CTRL5, val); /* b. set iqcal_mode[1:0] to 0x2 and set iqcal_tone[3:2] to 0 */ hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (read) = 0x%08X\n", reg_mode_ctrl)); reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE); /* mode=2, tone=0 */ /* reg_mode_ctrl |= (MASK_CALIB_START|2); */ /* mode=2, tone=1 */ /* reg_mode_ctrl |= (MASK_CALIB_START|2|(1<<2)); */ /* mode=2, tone=2 */ reg_mode_ctrl |= (MASK_CALIB_START|2|(2<<2)); hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); hw_get_dxx_reg(phw_data, 0x5C, &reg_dc_cancel); PHY_DEBUG(("[CAL] DC_CANCEL (read) = 0x%08X\n", reg_dc_cancel)); for (loop = 0; loop < LOOP_TIMES; loop++) { PHY_DEBUG(("[CAL] [%d.] ==================================\n", loop)); /* c. reset cancel_dc_i[9:5] and cancel_dc_q[4:0] in register DC_Cancel */ reg_dc_cancel &= ~(0x03FF); PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel)); hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel); hw_get_dxx_reg(phw_data, REG_CALIB_READ2, &val); PHY_DEBUG(("[CAL] CALIB_READ2 = 0x%08X\n", val)); iqcal_image_i = _s13_to_s32(val & 0x00001FFF); iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13); sqsum = iqcal_image_i*iqcal_image_i + iqcal_image_q*iqcal_image_q; mag_0 = (s32) _sqrt(sqsum); PHY_DEBUG(("[CAL] mag_0=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n", mag_0, iqcal_image_i, iqcal_image_q)); /* d. */ reg_dc_cancel |= (1 << CANCEL_DC_I_SHIFT); PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel)); hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel); hw_get_dxx_reg(phw_data, REG_CALIB_READ2, &val); PHY_DEBUG(("[CAL] CALIB_READ2 = 0x%08X\n", val)); iqcal_image_i = _s13_to_s32(val & 0x00001FFF); iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13); sqsum = iqcal_image_i*iqcal_image_i + iqcal_image_q*iqcal_image_q; mag_1 = (s32) _sqrt(sqsum); PHY_DEBUG(("[CAL] mag_1=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n", mag_1, iqcal_image_i, iqcal_image_q)); /* e. Calculate the correct DC offset cancellation value for I */ if (mag_0 != mag_1) fix_cancel_dc_i = (mag_0*10000) / (mag_0*10000 - mag_1*10000); else { if (mag_0 == mag_1) PHY_DEBUG(("[CAL] ***** mag_0 = mag_1 !!\n")); fix_cancel_dc_i = 0; } PHY_DEBUG(("[CAL] ** fix_cancel_dc_i = %d (0x%04X)\n", fix_cancel_dc_i, _s32_to_s5(fix_cancel_dc_i))); if ((abs(mag_1-mag_0)*6) > mag_0) break; } if (loop >= 19) fix_cancel_dc_i = 0; reg_dc_cancel &= ~(0x03FF); reg_dc_cancel |= (_s32_to_s5(fix_cancel_dc_i) << CANCEL_DC_I_SHIFT); hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel); PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel)); /* g. */ reg_mode_ctrl &= ~MASK_CALIB_START; hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); } /*****************************************************/ void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data) { u32 reg_agc_ctrl3; u32 reg_mode_ctrl; u32 reg_dc_cancel; s32 iqcal_image_i; s32 iqcal_image_q; u32 sqsum; s32 mag_0; s32 mag_1; s32 fix_cancel_dc_q = 0; u32 val; int loop; PHY_DEBUG(("[CAL] -> [3]_txqdac_dc_offset_cacellation()\n")); /*0x01 0xEE3FC2 ; 3B8FF ; Calibration (6a). enable TX IQ calibration loop circuits */ phy_set_rf_data(phw_data, 1, (1<<24)|0xEE3FC2); /* 0x0B 0x1905D6 ; 06417 ; Calibration (6b). enable TX I/Q cal loop squaring circuit */ phy_set_rf_data(phw_data, 11, (11<<24)|0x1901D6); /* 0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized */ phy_set_rf_data(phw_data, 5, (5<<24)|0x24C48A); /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */ phy_set_rf_data(phw_data, 6, (6<<24)|0x06890C); /* 0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode */ phy_set_rf_data(phw_data, 0, (0<<24)|0xFDF1C0); hw_set_dxx_reg(phw_data, 0x58, 0x30303030); /* IQ_Alpha Changed */ /* a. Disable AGC */ hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &reg_agc_ctrl3); reg_agc_ctrl3 &= ~BIT(2); reg_agc_ctrl3 |= (MASK_LNA_FIX_GAIN|MASK_AGC_FIX); hw_set_dxx_reg(phw_data, REG_AGC_CTRL3, reg_agc_ctrl3); hw_get_dxx_reg(phw_data, REG_AGC_CTRL5, &val); val |= MASK_AGC_FIX_GAIN; hw_set_dxx_reg(phw_data, REG_AGC_CTRL5, val); /* a. set iqcal_mode[1:0] to 0x3 and set iqcal_tone[3:2] to 0 */ hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (read) = 0x%08X\n", reg_mode_ctrl)); /* reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE); */ reg_mode_ctrl &= ~(MASK_IQCAL_MODE); reg_mode_ctrl |= (MASK_CALIB_START|3); hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); hw_get_dxx_reg(phw_data, 0x5C, &reg_dc_cancel); PHY_DEBUG(("[CAL] DC_CANCEL (read) = 0x%08X\n", reg_dc_cancel)); for (loop = 0; loop < LOOP_TIMES; loop++) { PHY_DEBUG(("[CAL] [%d.] ==================================\n", loop)); /* b. reset cancel_dc_q[4:0] in register DC_Cancel */ reg_dc_cancel &= ~(0x001F); PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel)); hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel); hw_get_dxx_reg(phw_data, REG_CALIB_READ2, &val); PHY_DEBUG(("[CAL] CALIB_READ2 = 0x%08X\n", val)); iqcal_image_i = _s13_to_s32(val & 0x00001FFF); iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13); sqsum = iqcal_image_i*iqcal_image_i + iqcal_image_q*iqcal_image_q; mag_0 = _sqrt(sqsum); PHY_DEBUG(("[CAL] mag_0=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n", mag_0, iqcal_image_i, iqcal_image_q)); /* c. */ reg_dc_cancel |= (1 << CANCEL_DC_Q_SHIFT); PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel)); hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel); hw_get_dxx_reg(phw_data, REG_CALIB_READ2, &val); PHY_DEBUG(("[CAL] CALIB_READ2 = 0x%08X\n", val)); iqcal_image_i = _s13_to_s32(val & 0x00001FFF); iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13); sqsum = iqcal_image_i*iqcal_image_i + iqcal_image_q*iqcal_image_q; mag_1 = _sqrt(sqsum); PHY_DEBUG(("[CAL] mag_1=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n", mag_1, iqcal_image_i, iqcal_image_q)); /* d. Calculate the correct DC offset cancellation value for I */ if (mag_0 != mag_1) fix_cancel_dc_q = (mag_0*10000) / (mag_0*10000 - mag_1*10000); else { if (mag_0 == mag_1) PHY_DEBUG(("[CAL] ***** mag_0 = mag_1 !!\n")); fix_cancel_dc_q = 0; } PHY_DEBUG(("[CAL] ** fix_cancel_dc_q = %d (0x%04X)\n", fix_cancel_dc_q, _s32_to_s5(fix_cancel_dc_q))); if ((abs(mag_1-mag_0)*6) > mag_0) break; } if (loop >= 19) fix_cancel_dc_q = 0; reg_dc_cancel &= ~(0x001F); reg_dc_cancel |= (_s32_to_s5(fix_cancel_dc_q) << CANCEL_DC_Q_SHIFT); hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel); PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel)); /* f. */ reg_mode_ctrl &= ~MASK_CALIB_START; hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); } /* 20060612.1.a 20060718.1 Modify */ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data, s32 a_2_threshold, s32 b_2_threshold) { u32 reg_mode_ctrl; s32 iq_mag_0_tx; s32 iqcal_tone_i0; s32 iqcal_tone_q0; s32 iqcal_tone_i; s32 iqcal_tone_q; u32 sqsum; s32 rot_i_b; s32 rot_q_b; s32 tx_cal_flt_b[4]; s32 tx_cal[4]; s32 tx_cal_reg[4]; s32 a_2, b_2; s32 sin_b, sin_2b; s32 cos_b, cos_2b; s32 divisor; s32 temp1, temp2; u32 val; u16 loop; s32 iqcal_tone_i_avg, iqcal_tone_q_avg; u8 verify_count; int capture_time; PHY_DEBUG(("[CAL] -> _tx_iq_calibration_loop()\n")); PHY_DEBUG(("[CAL] ** a_2_threshold = %d\n", a_2_threshold)); PHY_DEBUG(("[CAL] ** b_2_threshold = %d\n", b_2_threshold)); verify_count = 0; hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (read) = 0x%08X\n", reg_mode_ctrl)); loop = LOOP_TIMES; while (loop > 0) { PHY_DEBUG(("[CAL] [%d.] <_tx_iq_calibration_loop>\n", (LOOP_TIMES-loop+1))); iqcal_tone_i_avg = 0; iqcal_tone_q_avg = 0; if (!hw_set_dxx_reg(phw_data, 0x3C, 0x00)) /* 20060718.1 modify */ return 0; for (capture_time = 0; capture_time < 10; capture_time++) { /* * a. Set iqcal_mode[1:0] to 0x2 and set "calib_start" to 0x1 to * enable "IQ calibration Mode II" */ reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE); reg_mode_ctrl &= ~MASK_IQCAL_MODE; reg_mode_ctrl |= (MASK_CALIB_START|0x02); reg_mode_ctrl |= (MASK_CALIB_START|0x02|2<<2); hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); /* b. */ hw_get_dxx_reg(phw_data, REG_CALIB_READ1, &val); PHY_DEBUG(("[CAL] CALIB_READ1 = 0x%08X\n", val)); iqcal_tone_i0 = _s13_to_s32(val & 0x00001FFF); iqcal_tone_q0 = _s13_to_s32((val & 0x03FFE000) >> 13); PHY_DEBUG(("[CAL] ** iqcal_tone_i0=%d, iqcal_tone_q0=%d\n", iqcal_tone_i0, iqcal_tone_q0)); sqsum = iqcal_tone_i0*iqcal_tone_i0 + iqcal_tone_q0*iqcal_tone_q0; iq_mag_0_tx = (s32) _sqrt(sqsum); PHY_DEBUG(("[CAL] ** iq_mag_0_tx=%d\n", iq_mag_0_tx)); /* c. Set "calib_start" to 0x0 */ reg_mode_ctrl &= ~MASK_CALIB_START; hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); /* * d. Set iqcal_mode[1:0] to 0x3 and set "calib_start" to 0x1 to * enable "IQ calibration Mode II" */ /* hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &val); */ hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl); reg_mode_ctrl &= ~MASK_IQCAL_MODE; reg_mode_ctrl |= (MASK_CALIB_START|0x03); hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); /* e. */ hw_get_dxx_reg(phw_data, REG_CALIB_READ1, &val); PHY_DEBUG(("[CAL] CALIB_READ1 = 0x%08X\n", val)); iqcal_tone_i = _s13_to_s32(val & 0x00001FFF); iqcal_tone_q = _s13_to_s32((val & 0x03FFE000) >> 13); PHY_DEBUG(("[CAL] ** iqcal_tone_i = %d, iqcal_tone_q = %d\n", iqcal_tone_i, iqcal_tone_q)); if (capture_time == 0) continue; else { iqcal_tone_i_avg = (iqcal_tone_i_avg*(capture_time-1) + iqcal_tone_i)/capture_time; iqcal_tone_q_avg = (iqcal_tone_q_avg*(capture_time-1) + iqcal_tone_q)/capture_time; } } iqcal_tone_i = iqcal_tone_i_avg; iqcal_tone_q = iqcal_tone_q_avg; rot_i_b = (iqcal_tone_i * iqcal_tone_i0 + iqcal_tone_q * iqcal_tone_q0) / 1024; rot_q_b = (iqcal_tone_i * iqcal_tone_q0 * (-1) + iqcal_tone_q * iqcal_tone_i0) / 1024; PHY_DEBUG(("[CAL] ** rot_i_b = %d, rot_q_b = %d\n", rot_i_b, rot_q_b)); /* f. */ divisor = ((iq_mag_0_tx * iq_mag_0_tx * 2)/1024 - rot_i_b) * 2; if (divisor == 0) { PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> ERROR *******\n")); PHY_DEBUG(("[CAL] ** divisor=0 to calculate EPS and THETA !!\n")); PHY_DEBUG(("[CAL] ******************************************\n")); break; } a_2 = (rot_i_b * 32768) / divisor; b_2 = (rot_q_b * (-32768)) / divisor; PHY_DEBUG(("[CAL] ***** EPSILON/2 = %d\n", a_2)); PHY_DEBUG(("[CAL] ***** THETA/2 = %d\n", b_2)); phw_data->iq_rsdl_gain_tx_d2 = a_2; phw_data->iq_rsdl_phase_tx_d2 = b_2; /* if ((abs(a_2) < 150) && (abs(b_2) < 100)) */ /* if ((abs(a_2) < 200) && (abs(b_2) < 200)) */ if ((abs(a_2) < a_2_threshold) && (abs(b_2) < b_2_threshold)) { verify_count++; PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> *************\n")); PHY_DEBUG(("[CAL] ** VERIFY OK # %d !!\n", verify_count)); PHY_DEBUG(("[CAL] ******************************************\n")); if (verify_count > 2) { PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> *********\n")); PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION (EPS,THETA) OK !!\n")); PHY_DEBUG(("[CAL] **************************************\n")); return 0; } continue; } else verify_count = 0; _sin_cos(b_2, &sin_b, &cos_b); _sin_cos(b_2*2, &sin_2b, &cos_2b); PHY_DEBUG(("[CAL] ** sin(b/2)=%d, cos(b/2)=%d\n", sin_b, cos_b)); PHY_DEBUG(("[CAL] ** sin(b)=%d, cos(b)=%d\n", sin_2b, cos_2b)); if (cos_2b == 0) { PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> ERROR *******\n")); PHY_DEBUG(("[CAL] ** cos(b)=0 !!\n")); PHY_DEBUG(("[CAL] ******************************************\n")); break; } /* 1280 * 32768 = 41943040 */ temp1 = (41943040/cos_2b)*cos_b; /* temp2 = (41943040/cos_2b)*sin_b*(-1); */ if (phw_data->revision == 0x2002) /* 1st-cut */ temp2 = (41943040/cos_2b)*sin_b*(-1); else /* 2nd-cut */ temp2 = (41943040*4/cos_2b)*sin_b*(-1); tx_cal_flt_b[0] = _floor(temp1/(32768+a_2)); tx_cal_flt_b[1] = _floor(temp2/(32768+a_2)); tx_cal_flt_b[2] = _floor(temp2/(32768-a_2)); tx_cal_flt_b[3] = _floor(temp1/(32768-a_2)); PHY_DEBUG(("[CAL] ** tx_cal_flt_b[0] = %d\n", tx_cal_flt_b[0])); PHY_DEBUG(("[CAL] tx_cal_flt_b[1] = %d\n", tx_cal_flt_b[1])); PHY_DEBUG(("[CAL] tx_cal_flt_b[2] = %d\n", tx_cal_flt_b[2])); PHY_DEBUG(("[CAL] tx_cal_flt_b[3] = %d\n", tx_cal_flt_b[3])); tx_cal[2] = tx_cal_flt_b[2]; tx_cal[2] = tx_cal[2] + 3; tx_cal[1] = tx_cal[2]; tx_cal[3] = tx_cal_flt_b[3] - 128; tx_cal[0] = -tx_cal[3] + 1; PHY_DEBUG(("[CAL] tx_cal[0] = %d\n", tx_cal[0])); PHY_DEBUG(("[CAL] tx_cal[1] = %d\n", tx_cal[1])); PHY_DEBUG(("[CAL] tx_cal[2] = %d\n", tx_cal[2])); PHY_DEBUG(("[CAL] tx_cal[3] = %d\n", tx_cal[3])); /* if ((tx_cal[0] == 0) && (tx_cal[1] == 0) && (tx_cal[2] == 0) && (tx_cal[3] == 0)) { */ /* PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> *************\n")); * PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION COMPLETE !!\n")); * PHY_DEBUG(("[CAL] ******************************************\n")); * return 0; } */ /* g. */ if (phw_data->revision == 0x2002) /* 1st-cut */{ hw_get_dxx_reg(phw_data, 0x54, &val); PHY_DEBUG(("[CAL] ** 0x54 = 0x%08X\n", val)); tx_cal_reg[0] = _s4_to_s32((val & 0xF0000000) >> 28); tx_cal_reg[1] = _s4_to_s32((val & 0x0F000000) >> 24); tx_cal_reg[2] = _s4_to_s32((val & 0x00F00000) >> 20); tx_cal_reg[3] = _s4_to_s32((val & 0x000F0000) >> 16); } else /* 2nd-cut */{ hw_get_dxx_reg(phw_data, 0x3C, &val); PHY_DEBUG(("[CAL] ** 0x3C = 0x%08X\n", val)); tx_cal_reg[0] = _s5_to_s32((val & 0xF8000000) >> 27); tx_cal_reg[1] = _s6_to_s32((val & 0x07E00000) >> 21); tx_cal_reg[2] = _s6_to_s32((val & 0x001F8000) >> 15); tx_cal_reg[3] = _s5_to_s32((val & 0x00007C00) >> 10); } PHY_DEBUG(("[CAL] ** tx_cal_reg[0] = %d\n", tx_cal_reg[0])); PHY_DEBUG(("[CAL] tx_cal_reg[1] = %d\n", tx_cal_reg[1])); PHY_DEBUG(("[CAL] tx_cal_reg[2] = %d\n", tx_cal_reg[2])); PHY_DEBUG(("[CAL] tx_cal_reg[3] = %d\n", tx_cal_reg[3])); if (phw_data->revision == 0x2002) /* 1st-cut */{ if (((tx_cal_reg[0] == 7) || (tx_cal_reg[0] == (-8))) && ((tx_cal_reg[3] == 7) || (tx_cal_reg[3] == (-8)))) { PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> *********\n")); PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION SATUATION !!\n")); PHY_DEBUG(("[CAL] **************************************\n")); break; } } else /* 2nd-cut */{ if (((tx_cal_reg[0] == 31) || (tx_cal_reg[0] == (-32))) && ((tx_cal_reg[3] == 31) || (tx_cal_reg[3] == (-32)))) { PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> *********\n")); PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION SATUATION !!\n")); PHY_DEBUG(("[CAL] **************************************\n")); break; } } tx_cal[0] = tx_cal[0] + tx_cal_reg[0]; tx_cal[1] = tx_cal[1] + tx_cal_reg[1]; tx_cal[2] = tx_cal[2] + tx_cal_reg[2]; tx_cal[3] = tx_cal[3] + tx_cal_reg[3]; PHY_DEBUG(("[CAL] ** apply tx_cal[0] = %d\n", tx_cal[0])); PHY_DEBUG(("[CAL] apply tx_cal[1] = %d\n", tx_cal[1])); PHY_DEBUG(("[CAL] apply tx_cal[2] = %d\n", tx_cal[2])); PHY_DEBUG(("[CAL] apply tx_cal[3] = %d\n", tx_cal[3])); if (phw_data->revision == 0x2002) /* 1st-cut */{ val &= 0x0000FFFF; val |= ((_s32_to_s4(tx_cal[0]) << 28)| (_s32_to_s4(tx_cal[1]) << 24)| (_s32_to_s4(tx_cal[2]) << 20)| (_s32_to_s4(tx_cal[3]) << 16)); hw_set_dxx_reg(phw_data, 0x54, val); PHY_DEBUG(("[CAL] ** CALIB_DATA = 0x%08X\n", val)); return 0; } else /* 2nd-cut */{ val &= 0x000003FF; val |= ((_s32_to_s5(tx_cal[0]) << 27)| (_s32_to_s6(tx_cal[1]) << 21)| (_s32_to_s6(tx_cal[2]) << 15)| (_s32_to_s5(tx_cal[3]) << 10)); hw_set_dxx_reg(phw_data, 0x3C, val); PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION = 0x%08X\n", val)); return 0; } /* i. Set "calib_start" to 0x0 */ reg_mode_ctrl &= ~MASK_CALIB_START; hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); loop--; } return 1; } void _tx_iq_calibration_winbond(struct hw_data *phw_data) { u32 reg_agc_ctrl3; #ifdef _DEBUG s32 tx_cal_reg[4]; #endif u32 reg_mode_ctrl; u32 val; u8 result; PHY_DEBUG(("[CAL] -> [4]_tx_iq_calibration()\n")); /* 0x01 0xEE3FC2 ; 3B8FF ; Calibration (6a). enable TX IQ calibration loop circuits */ phy_set_rf_data(phw_data, 1, (1<<24)|0xEE3FC2); /* 0x0B 0x1905D6 ; 06417 ; Calibration (6b). enable TX I/Q cal loop squaring circuit */ phy_set_rf_data(phw_data, 11, (11<<24)|0x19BDD6); /* 20060612.1.a 0x1905D6); */ /* 0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized */ phy_set_rf_data(phw_data, 5, (5<<24)|0x24C60A); /* 0x24C60A (high temperature) */ /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */ phy_set_rf_data(phw_data, 6, (6<<24)|0x34880C); /* 20060612.1.a 0x06890C); */ /* 0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode */ phy_set_rf_data(phw_data, 0, (0<<24)|0xFDF1C0); /* ; [BB-chip]: Calibration (6f).Send test pattern */ /* ; [BB-chip]: Calibration (6g). Search RXGCL optimal value */ /* ; [BB-chip]: Calibration (6h). Calculate TX-path IQ imbalance and setting TX path IQ compensation table */ /* phy_set_rf_data(phw_data, 3, (3<<24)|0x025586); */ msleep(30); /* 20060612.1.a 30ms delay. Add the follow 2 lines */ /* To adjust TXVGA to fit iq_mag_0 range from 1250 ~ 1750 */ adjust_TXVGA_for_iq_mag(phw_data); /* a. Disable AGC */ hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &reg_agc_ctrl3); reg_agc_ctrl3 &= ~BIT(2); reg_agc_ctrl3 |= (MASK_LNA_FIX_GAIN|MASK_AGC_FIX); hw_set_dxx_reg(phw_data, REG_AGC_CTRL3, reg_agc_ctrl3); hw_get_dxx_reg(phw_data, REG_AGC_CTRL5, &val); val |= MASK_AGC_FIX_GAIN; hw_set_dxx_reg(phw_data, REG_AGC_CTRL5, val); result = _tx_iq_calibration_loop_winbond(phw_data, 150, 100); if (result > 0) { if (phw_data->revision == 0x2002) /* 1st-cut */{ hw_get_dxx_reg(phw_data, 0x54, &val); val &= 0x0000FFFF; hw_set_dxx_reg(phw_data, 0x54, val); } else /* 2nd-cut*/{ hw_get_dxx_reg(phw_data, 0x3C, &val); val &= 0x000003FF; hw_set_dxx_reg(phw_data, 0x3C, val); } result = _tx_iq_calibration_loop_winbond(phw_data, 300, 200); if (result > 0) { if (phw_data->revision == 0x2002) /* 1st-cut */{ hw_get_dxx_reg(phw_data, 0x54, &val); val &= 0x0000FFFF; hw_set_dxx_reg(phw_data, 0x54, val); } else /* 2nd-cut*/{ hw_get_dxx_reg(phw_data, 0x3C, &val); val &= 0x000003FF; hw_set_dxx_reg(phw_data, 0x3C, val); } result = _tx_iq_calibration_loop_winbond(phw_data, 500, 400); if (result > 0) { if (phw_data->revision == 0x2002) /* 1st-cut */{ hw_get_dxx_reg(phw_data, 0x54, &val); val &= 0x0000FFFF; hw_set_dxx_reg(phw_data, 0x54, val); } else /* 2nd-cut */{ hw_get_dxx_reg(phw_data, 0x3C, &val); val &= 0x000003FF; hw_set_dxx_reg(phw_data, 0x3C, val); } result = _tx_iq_calibration_loop_winbond(phw_data, 700, 500); if (result > 0) { PHY_DEBUG(("[CAL] ** <_tx_iq_calibration> **************\n")); PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION FAILURE !!\n")); PHY_DEBUG(("[CAL] **************************************\n")); if (phw_data->revision == 0x2002) /* 1st-cut */{ hw_get_dxx_reg(phw_data, 0x54, &val); val &= 0x0000FFFF; hw_set_dxx_reg(phw_data, 0x54, val); } else /* 2nd-cut */{ hw_get_dxx_reg(phw_data, 0x3C, &val); val &= 0x000003FF; hw_set_dxx_reg(phw_data, 0x3C, val); } } } } } /* i. Set "calib_start" to 0x0 */ hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl); reg_mode_ctrl &= ~MASK_CALIB_START; hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); /* g. Enable AGC */ /* hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &val); */ reg_agc_ctrl3 |= BIT(2); reg_agc_ctrl3 &= ~(MASK_LNA_FIX_GAIN|MASK_AGC_FIX); hw_set_dxx_reg(phw_data, REG_AGC_CTRL3, reg_agc_ctrl3); #ifdef _DEBUG if (phw_data->revision == 0x2002) /* 1st-cut */{ hw_get_dxx_reg(phw_data, 0x54, &val); PHY_DEBUG(("[CAL] ** 0x54 = 0x%08X\n", val)); tx_cal_reg[0] = _s4_to_s32((val & 0xF0000000) >> 28); tx_cal_reg[1] = _s4_to_s32((val & 0x0F000000) >> 24); tx_cal_reg[2] = _s4_to_s32((val & 0x00F00000) >> 20); tx_cal_reg[3] = _s4_to_s32((val & 0x000F0000) >> 16); } else /* 2nd-cut */ { hw_get_dxx_reg(phw_data, 0x3C, &val); PHY_DEBUG(("[CAL] ** 0x3C = 0x%08X\n", val)); tx_cal_reg[0] = _s5_to_s32((val & 0xF8000000) >> 27); tx_cal_reg[1] = _s6_to_s32((val & 0x07E00000) >> 21); tx_cal_reg[2] = _s6_to_s32((val & 0x001F8000) >> 15); tx_cal_reg[3] = _s5_to_s32((val & 0x00007C00) >> 10); } PHY_DEBUG(("[CAL] ** tx_cal_reg[0] = %d\n", tx_cal_reg[0])); PHY_DEBUG(("[CAL] tx_cal_reg[1] = %d\n", tx_cal_reg[1])); PHY_DEBUG(("[CAL] tx_cal_reg[2] = %d\n", tx_cal_reg[2])); PHY_DEBUG(("[CAL] tx_cal_reg[3] = %d\n", tx_cal_reg[3])); #endif /* * for test - BEN * RF Control Override */ } /*****************************************************/ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 frequency) { u32 reg_mode_ctrl; s32 iqcal_tone_i; s32 iqcal_tone_q; s32 iqcal_image_i; s32 iqcal_image_q; s32 rot_tone_i_b; s32 rot_tone_q_b; s32 rot_image_i_b; s32 rot_image_q_b; s32 rx_cal_flt_b[4]; s32 rx_cal[4]; s32 rx_cal_reg[4]; s32 a_2, b_2; s32 sin_b, sin_2b; s32 cos_b, cos_2b; s32 temp1, temp2; u32 val; u16 loop; u32 pwr_tone; u32 pwr_image; u8 verify_count; s32 iqcal_tone_i_avg, iqcal_tone_q_avg; s32 iqcal_image_i_avg, iqcal_image_q_avg; u16 capture_time; PHY_DEBUG(("[CAL] -> [5]_rx_iq_calibration_loop()\n")); PHY_DEBUG(("[CAL] ** factor = %d\n", factor)); hw_set_dxx_reg(phw_data, 0x58, 0x44444444); /* IQ_Alpha */ /* b. */ hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (read) = 0x%08X\n", reg_mode_ctrl)); verify_count = 0; /* for (loop = 0; loop < 1; loop++) */ /* for (loop = 0; loop < LOOP_TIMES; loop++) */ loop = LOOP_TIMES; while (loop > 0) { PHY_DEBUG(("[CAL] [%d.] <_rx_iq_calibration_loop>\n", (LOOP_TIMES-loop+1))); iqcal_tone_i_avg = 0; iqcal_tone_q_avg = 0; iqcal_image_i_avg = 0; iqcal_image_q_avg = 0; capture_time = 0; for (capture_time = 0; capture_time < 10; capture_time++) { /* i. Set "calib_start" to 0x0 */ reg_mode_ctrl &= ~MASK_CALIB_START; if (!hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl))/*20060718.1 modify */ return 0; PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); reg_mode_ctrl &= ~MASK_IQCAL_MODE; reg_mode_ctrl |= (MASK_CALIB_START|0x1); hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); /* c. */ hw_get_dxx_reg(phw_data, REG_CALIB_READ1, &val); PHY_DEBUG(("[CAL] CALIB_READ1 = 0x%08X\n", val)); iqcal_tone_i = _s13_to_s32(val & 0x00001FFF); iqcal_tone_q = _s13_to_s32((val & 0x03FFE000) >> 13); PHY_DEBUG(("[CAL] ** iqcal_tone_i = %d, iqcal_tone_q = %d\n", iqcal_tone_i, iqcal_tone_q)); hw_get_dxx_reg(phw_data, REG_CALIB_READ2, &val); PHY_DEBUG(("[CAL] CALIB_READ2 = 0x%08X\n", val)); iqcal_image_i = _s13_to_s32(val & 0x00001FFF); iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13); PHY_DEBUG(("[CAL] ** iqcal_image_i = %d, iqcal_image_q = %d\n", iqcal_image_i, iqcal_image_q)); if (capture_time == 0) continue; else { iqcal_image_i_avg = (iqcal_image_i_avg*(capture_time-1) + iqcal_image_i)/capture_time; iqcal_image_q_avg = (iqcal_image_q_avg*(capture_time-1) + iqcal_image_q)/capture_time; iqcal_tone_i_avg = (iqcal_tone_i_avg*(capture_time-1) + iqcal_tone_i)/capture_time; iqcal_tone_q_avg = (iqcal_tone_q_avg*(capture_time-1) + iqcal_tone_q)/capture_time; } } iqcal_image_i = iqcal_image_i_avg; iqcal_image_q = iqcal_image_q_avg; iqcal_tone_i = iqcal_tone_i_avg; iqcal_tone_q = iqcal_tone_q_avg; /* d. */ rot_tone_i_b = (iqcal_tone_i * iqcal_tone_i + iqcal_tone_q * iqcal_tone_q) / 1024; rot_tone_q_b = (iqcal_tone_i * iqcal_tone_q * (-1) + iqcal_tone_q * iqcal_tone_i) / 1024; rot_image_i_b = (iqcal_image_i * iqcal_tone_i - iqcal_image_q * iqcal_tone_q) / 1024; rot_image_q_b = (iqcal_image_i * iqcal_tone_q + iqcal_image_q * iqcal_tone_i) / 1024; PHY_DEBUG(("[CAL] ** rot_tone_i_b = %d\n", rot_tone_i_b)); PHY_DEBUG(("[CAL] ** rot_tone_q_b = %d\n", rot_tone_q_b)); PHY_DEBUG(("[CAL] ** rot_image_i_b = %d\n", rot_image_i_b)); PHY_DEBUG(("[CAL] ** rot_image_q_b = %d\n", rot_image_q_b)); /* f. */ if (rot_tone_i_b == 0) { PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> ERROR *******\n")); PHY_DEBUG(("[CAL] ** rot_tone_i_b=0 to calculate EPS and THETA !!\n")); PHY_DEBUG(("[CAL] ******************************************\n")); break; } a_2 = (rot_image_i_b * 32768) / rot_tone_i_b - phw_data->iq_rsdl_gain_tx_d2; b_2 = (rot_image_q_b * 32768) / rot_tone_i_b - phw_data->iq_rsdl_phase_tx_d2; PHY_DEBUG(("[CAL] ** iq_rsdl_gain_tx_d2 = %d\n", phw_data->iq_rsdl_gain_tx_d2)); PHY_DEBUG(("[CAL] ** iq_rsdl_phase_tx_d2= %d\n", phw_data->iq_rsdl_phase_tx_d2)); PHY_DEBUG(("[CAL] ***** EPSILON/2 = %d\n", a_2)); PHY_DEBUG(("[CAL] ***** THETA/2 = %d\n", b_2)); _sin_cos(b_2, &sin_b, &cos_b); _sin_cos(b_2*2, &sin_2b, &cos_2b); PHY_DEBUG(("[CAL] ** sin(b/2)=%d, cos(b/2)=%d\n", sin_b, cos_b)); PHY_DEBUG(("[CAL] ** sin(b)=%d, cos(b)=%d\n", sin_2b, cos_2b)); if (cos_2b == 0) { PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> ERROR *******\n")); PHY_DEBUG(("[CAL] ** cos(b)=0 !!\n")); PHY_DEBUG(("[CAL] ******************************************\n")); break; } /* 1280 * 32768 = 41943040 */ temp1 = (41943040/cos_2b)*cos_b; /* temp2 = (41943040/cos_2b)*sin_b*(-1); */ if (phw_data->revision == 0x2002)/* 1st-cut */ temp2 = (41943040/cos_2b)*sin_b*(-1); else/* 2nd-cut */ temp2 = (41943040*4/cos_2b)*sin_b*(-1); rx_cal_flt_b[0] = _floor(temp1/(32768+a_2)); rx_cal_flt_b[1] = _floor(temp2/(32768-a_2)); rx_cal_flt_b[2] = _floor(temp2/(32768+a_2)); rx_cal_flt_b[3] = _floor(temp1/(32768-a_2)); PHY_DEBUG(("[CAL] ** rx_cal_flt_b[0] = %d\n", rx_cal_flt_b[0])); PHY_DEBUG(("[CAL] rx_cal_flt_b[1] = %d\n", rx_cal_flt_b[1])); PHY_DEBUG(("[CAL] rx_cal_flt_b[2] = %d\n", rx_cal_flt_b[2])); PHY_DEBUG(("[CAL] rx_cal_flt_b[3] = %d\n", rx_cal_flt_b[3])); rx_cal[0] = rx_cal_flt_b[0] - 128; rx_cal[1] = rx_cal_flt_b[1]; rx_cal[2] = rx_cal_flt_b[2]; rx_cal[3] = rx_cal_flt_b[3] - 128; PHY_DEBUG(("[CAL] ** rx_cal[0] = %d\n", rx_cal[0])); PHY_DEBUG(("[CAL] rx_cal[1] = %d\n", rx_cal[1])); PHY_DEBUG(("[CAL] rx_cal[2] = %d\n", rx_cal[2])); PHY_DEBUG(("[CAL] rx_cal[3] = %d\n", rx_cal[3])); /* e. */ pwr_tone = (iqcal_tone_i*iqcal_tone_i + iqcal_tone_q*iqcal_tone_q); pwr_image = (iqcal_image_i*iqcal_image_i + iqcal_image_q*iqcal_image_q)*factor; PHY_DEBUG(("[CAL] ** pwr_tone = %d\n", pwr_tone)); PHY_DEBUG(("[CAL] ** pwr_image = %d\n", pwr_image)); if (pwr_tone > pwr_image) { verify_count++; PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> *************\n")); PHY_DEBUG(("[CAL] ** VERIFY OK # %d !!\n", verify_count)); PHY_DEBUG(("[CAL] ******************************************\n")); if (verify_count > 2) { PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> *********\n")); PHY_DEBUG(("[CAL] ** RX_IQ_CALIBRATION OK !!\n")); PHY_DEBUG(("[CAL] **************************************\n")); return 0; } continue; } /* g. */ hw_get_dxx_reg(phw_data, 0x54, &val); PHY_DEBUG(("[CAL] ** 0x54 = 0x%08X\n", val)); if (phw_data->revision == 0x2002) /* 1st-cut */{ rx_cal_reg[0] = _s4_to_s32((val & 0x0000F000) >> 12); rx_cal_reg[1] = _s4_to_s32((val & 0x00000F00) >> 8); rx_cal_reg[2] = _s4_to_s32((val & 0x000000F0) >> 4); rx_cal_reg[3] = _s4_to_s32((val & 0x0000000F)); } else /* 2nd-cut */{ rx_cal_reg[0] = _s5_to_s32((val & 0xF8000000) >> 27); rx_cal_reg[1] = _s6_to_s32((val & 0x07E00000) >> 21); rx_cal_reg[2] = _s6_to_s32((val & 0x001F8000) >> 15); rx_cal_reg[3] = _s5_to_s32((val & 0x00007C00) >> 10); } PHY_DEBUG(("[CAL] ** rx_cal_reg[0] = %d\n", rx_cal_reg[0])); PHY_DEBUG(("[CAL] rx_cal_reg[1] = %d\n", rx_cal_reg[1])); PHY_DEBUG(("[CAL] rx_cal_reg[2] = %d\n", rx_cal_reg[2])); PHY_DEBUG(("[CAL] rx_cal_reg[3] = %d\n", rx_cal_reg[3])); if (phw_data->revision == 0x2002) /* 1st-cut */{ if (((rx_cal_reg[0] == 7) || (rx_cal_reg[0] == (-8))) && ((rx_cal_reg[3] == 7) || (rx_cal_reg[3] == (-8)))) { PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> *********\n")); PHY_DEBUG(("[CAL] ** RX_IQ_CALIBRATION SATUATION !!\n")); PHY_DEBUG(("[CAL] **************************************\n")); break; } } else /* 2nd-cut */{ if (((rx_cal_reg[0] == 31) || (rx_cal_reg[0] == (-32))) && ((rx_cal_reg[3] == 31) || (rx_cal_reg[3] == (-32)))) { PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> *********\n")); PHY_DEBUG(("[CAL] ** RX_IQ_CALIBRATION SATUATION !!\n")); PHY_DEBUG(("[CAL] **************************************\n")); break; } } rx_cal[0] = rx_cal[0] + rx_cal_reg[0]; rx_cal[1] = rx_cal[1] + rx_cal_reg[1]; rx_cal[2] = rx_cal[2] + rx_cal_reg[2]; rx_cal[3] = rx_cal[3] + rx_cal_reg[3]; PHY_DEBUG(("[CAL] ** apply rx_cal[0] = %d\n", rx_cal[0])); PHY_DEBUG(("[CAL] apply rx_cal[1] = %d\n", rx_cal[1])); PHY_DEBUG(("[CAL] apply rx_cal[2] = %d\n", rx_cal[2])); PHY_DEBUG(("[CAL] apply rx_cal[3] = %d\n", rx_cal[3])); hw_get_dxx_reg(phw_data, 0x54, &val); if (phw_data->revision == 0x2002) /* 1st-cut */{ val &= 0x0000FFFF; val |= ((_s32_to_s4(rx_cal[0]) << 12)| (_s32_to_s4(rx_cal[1]) << 8)| (_s32_to_s4(rx_cal[2]) << 4)| (_s32_to_s4(rx_cal[3]))); hw_set_dxx_reg(phw_data, 0x54, val); } else /* 2nd-cut */{ val &= 0x000003FF; val |= ((_s32_to_s5(rx_cal[0]) << 27)| (_s32_to_s6(rx_cal[1]) << 21)| (_s32_to_s6(rx_cal[2]) << 15)| (_s32_to_s5(rx_cal[3]) << 10)); hw_set_dxx_reg(phw_data, 0x54, val); if (loop == 3) return 0; } PHY_DEBUG(("[CAL] ** CALIB_DATA = 0x%08X\n", val)); loop--; } return 1; } /*************************************************/ /***************************************************************/ void _rx_iq_calibration_winbond(struct hw_data *phw_data, u32 frequency) { /* figo 20050523 marked this flag for can't compile for release */ #ifdef _DEBUG s32 rx_cal_reg[4]; u32 val; #endif u8 result; PHY_DEBUG(("[CAL] -> [5]_rx_iq_calibration()\n")); /* a. Set RFIC to "RX calibration mode" */ /* ; ----- Calibration (7). RX path IQ imbalance calibration loop */ /* 0x01 0xFFBFC2 ; 3FEFF ; Calibration (7a). enable RX IQ calibration loop circuits */ phy_set_rf_data(phw_data, 1, (1<<24)|0xEFBFC2); /* 0x0B 0x1A01D6 ; 06817 ; Calibration (7b). enable RX I/Q cal loop SW1 circuits */ phy_set_rf_data(phw_data, 11, (11<<24)|0x1A05D6); /* 0x05 0x24848A ; 09212 ; Calibration (7c). setting TX-VGA gain (TXGCH) to 2 --> to be optimized */ phy_set_rf_data(phw_data, 5, (5<<24) | phw_data->txvga_setting_for_cal); /* 0x06 0x06840C ; 01A10 ; Calibration (7d). RXGCH=00; RXGCL=010 000 (RXVGA) --> to be optimized */ phy_set_rf_data(phw_data, 6, (6<<24)|0x06834C); /* 0x00 0xFFF1C0 ; 3F7C7 ; Calibration (7e). turn on IQ imbalance/Test mode */ phy_set_rf_data(phw_data, 0, (0<<24)|0xFFF1C0); /* ; [BB-chip]: Calibration (7f). Send test pattern */ /* ; [BB-chip]: Calibration (7g). Search RXGCL optimal value */ /* ; [BB-chip]: Calibration (7h). Calculate RX-path IQ imbalance and setting RX path IQ compensation table */ result = _rx_iq_calibration_loop_winbond(phw_data, 12589, frequency); if (result > 0) { _reset_rx_cal(phw_data); result = _rx_iq_calibration_loop_winbond(phw_data, 7943, frequency); if (result > 0) { _reset_rx_cal(phw_data); result = _rx_iq_calibration_loop_winbond(phw_data, 5011, frequency); if (result > 0) { PHY_DEBUG(("[CAL] ** <_rx_iq_calibration> **************\n")); PHY_DEBUG(("[CAL] ** RX_IQ_CALIBRATION FAILURE !!\n")); PHY_DEBUG(("[CAL] **************************************\n")); _reset_rx_cal(phw_data); } } } #ifdef _DEBUG hw_get_dxx_reg(phw_data, 0x54, &val); PHY_DEBUG(("[CAL] ** 0x54 = 0x%08X\n", val)); if (phw_data->revision == 0x2002) /* 1st-cut */{ rx_cal_reg[0] = _s4_to_s32((val & 0x0000F000) >> 12); rx_cal_reg[1] = _s4_to_s32((val & 0x00000F00) >> 8); rx_cal_reg[2] = _s4_to_s32((val & 0x000000F0) >> 4); rx_cal_reg[3] = _s4_to_s32((val & 0x0000000F)); } else /* 2nd-cut */{ rx_cal_reg[0] = _s5_to_s32((val & 0xF8000000) >> 27); rx_cal_reg[1] = _s6_to_s32((val & 0x07E00000) >> 21); rx_cal_reg[2] = _s6_to_s32((val & 0x001F8000) >> 15); rx_cal_reg[3] = _s5_to_s32((val & 0x00007C00) >> 10); } PHY_DEBUG(("[CAL] ** rx_cal_reg[0] = %d\n", rx_cal_reg[0])); PHY_DEBUG(("[CAL] rx_cal_reg[1] = %d\n", rx_cal_reg[1])); PHY_DEBUG(("[CAL] rx_cal_reg[2] = %d\n", rx_cal_reg[2])); PHY_DEBUG(("[CAL] rx_cal_reg[3] = %d\n", rx_cal_reg[3])); #endif } /*******************************************************/ void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency) { u32 reg_mode_ctrl; u32 iq_alpha; PHY_DEBUG(("[CAL] -> phy_calibration_winbond()\n")); hw_get_dxx_reg(phw_data, 0x58, &iq_alpha); _rxadc_dc_offset_cancellation_winbond(phw_data, frequency); /* _txidac_dc_offset_cancellation_winbond(phw_data); */ /* _txqdac_dc_offset_cancellation_winbond(phw_data); */ _tx_iq_calibration_winbond(phw_data); _rx_iq_calibration_winbond(phw_data, frequency); /*********************************************************************/ hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl); reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE|MASK_CALIB_START); /* set when finish */ hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); /* i. Set RFIC to "Normal mode" */ hw_set_dxx_reg(phw_data, 0x58, iq_alpha); /*********************************************************************/ phy_init_rf(phw_data); } /******************/ void phy_set_rf_data(struct hw_data *pHwData, u32 index, u32 value) { u32 ltmp = 0; switch (pHwData->phy_type) { case RF_MAXIM_2825: case RF_MAXIM_V1: /* 11g Winbond 2nd BB(with Phy board (v1) + Maxim 331) */ ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18); break; case RF_MAXIM_2827: ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18); break; case RF_MAXIM_2828: ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18); break; case RF_MAXIM_2829: ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18); break; case RF_AIROHA_2230: case RF_AIROHA_2230S: /* 20060420 Add this */ ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse(value, 20); break; case RF_AIROHA_7230: ltmp = (1 << 31) | (0 << 30) | (24 << 24) | (value&0xffffff); break; case RF_WB_242: case RF_WB_242_1:/* 20060619.5 Add */ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse(value, 24); break; } Wb35Reg_WriteSync(pHwData, 0x0864, ltmp); } /* 20060717 modify as Bruce's mail */ unsigned char adjust_TXVGA_for_iq_mag(struct hw_data *phw_data) { int init_txvga = 0; u32 reg_mode_ctrl; u32 val; s32 iqcal_tone_i0; s32 iqcal_tone_q0; u32 sqsum; s32 iq_mag_0_tx; u8 reg_state; int current_txvga; reg_state = 0; for (init_txvga = 0; init_txvga < 10; init_txvga++) { current_txvga = (0x24C40A|(init_txvga<<6)); phy_set_rf_data(phw_data, 5, ((5<<24)|current_txvga)); phw_data->txvga_setting_for_cal = current_txvga; msleep(30);/* 20060612.1.a */ if (!hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl))/* 20060718.1 modify */ return false; PHY_DEBUG(("[CAL] MODE_CTRL (read) = 0x%08X\n", reg_mode_ctrl)); /* * a. Set iqcal_mode[1:0] to 0x2 and set "calib_start" to 0x1 to * enable "IQ alibration Mode II" */ reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE); reg_mode_ctrl &= ~MASK_IQCAL_MODE; reg_mode_ctrl |= (MASK_CALIB_START|0x02); reg_mode_ctrl |= (MASK_CALIB_START|0x02|2<<2); hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl); PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl)); udelay(1);/* 20060612.1.a */ udelay(300);/* 20060612.1.a */ /* b. */ hw_get_dxx_reg(phw_data, REG_CALIB_READ1, &val); PHY_DEBUG(("[CAL] CALIB_READ1 = 0x%08X\n", val)); udelay(300);/* 20060612.1.a */ iqcal_tone_i0 = _s13_to_s32(val & 0x00001FFF); iqcal_tone_q0 = _s13_to_s32((val & 0x03FFE000) >> 13); PHY_DEBUG(("[CAL] ** iqcal_tone_i0=%d, iqcal_tone_q0=%d\n", iqcal_tone_i0, iqcal_tone_q0)); sqsum = iqcal_tone_i0*iqcal_tone_i0 + iqcal_tone_q0*iqcal_tone_q0; iq_mag_0_tx = (s32) _sqrt(sqsum); PHY_DEBUG(("[CAL] ** auto_adjust_txvga_for_iq_mag_0_tx=%d\n", iq_mag_0_tx)); if (iq_mag_0_tx >= 700 && iq_mag_0_tx <= 1750) break; else if (iq_mag_0_tx > 1750) { init_txvga = -2; continue; } else continue; } if (iq_mag_0_tx >= 700 && iq_mag_0_tx <= 1750) return true; else return false; }
gpl-2.0
sktjdgns1189/android_kernel_samsung_frescolteskt
arch/powerpc/sysdev/fsl_rio.c
4597
17990
/* * Freescale MPC85xx/MPC86xx RapidIO support * * Copyright 2009 Sysgo AG * Thomas Moll <thomas.moll@sysgo.com> * - fixed maintenance access routines, check for aligned access * * Copyright 2009 Integrated Device Technology, Inc. * Alex Bounine <alexandre.bounine@idt.com> * - Added Port-Write message handling * - Added Machine Check exception handling * * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc. * Zhang Wei <wei.zhang@freescale.com> * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/of_platform.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/machdep.h> #include "fsl_rio.h" #undef DEBUG_PW /* Port-Write debugging */ #define RIO_PORT1_EDCSR 0x0640 #define RIO_PORT2_EDCSR 0x0680 #define RIO_PORT1_IECSR 0x10130 #define RIO_PORT2_IECSR 0x101B0 #define RIO_GCCSR 0x13c #define RIO_ESCSR 0x158 #define ESCSR_CLEAR 0x07120204 #define RIO_PORT2_ESCSR 0x178 #define RIO_CCSR 0x15c #define RIO_LTLEDCSR_IER 0x80000000 #define RIO_LTLEDCSR_PRT 0x01000000 #define IECSR_CLEAR 0x80000000 #define RIO_ISR_AACR 0x10120 #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ #define __fsl_read_rio_config(x, addr, err, op) \ __asm__ __volatile__( \ "1: "op" %1,0(%2)\n" \ " eieio\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %1,-1\n" \ " li %0,%3\n" \ " b 2b\n" \ ".section __ex_table,\"a\"\n" \ PPC_LONG_ALIGN "\n" \ PPC_LONG "1b,3b\n" \ ".text" \ : "=r" (err), "=r" (x) \ : "b" (addr), "i" (-EFAULT), "0" (err)) void __iomem *rio_regs_win; void __iomem *rmu_regs_win; resource_size_t rio_law_start; struct fsl_rio_dbell *dbell; struct fsl_rio_pw *pw; #ifdef CONFIG_E500 int fsl_rio_mcheck_exception(struct pt_regs *regs) { const struct exception_table_entry *entry; unsigned long reason; if (!rio_regs_win) return 0; reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { /* Check if we are prepared to handle this fault */ entry = search_exception_tables(regs->nip); if (entry) { pr_debug("RIO: %s - MC Exception handled\n", __func__); out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); regs->msr |= MSR_RI; regs->nip = entry->fixup; return 1; } } return 0; } EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception); #endif /** * fsl_local_config_read - Generate a MPC85xx local config space read * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @data: Value to be read into * * Generates a MPC85xx local configuration space read. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_local_config_read(struct rio_mport *mport, int index, u32 offset, int len, u32 *data) { struct rio_priv *priv = mport->priv; pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index, offset); *data = in_be32(priv->regs_win + offset); return 0; } /** * fsl_local_config_write - Generate a MPC85xx local config space write * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @data: Value to be written * * Generates a MPC85xx local configuration space write. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_local_config_write(struct rio_mport *mport, int index, u32 offset, int len, u32 data) { struct rio_priv *priv = mport->priv; pr_debug ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n", index, offset, data); out_be32(priv->regs_win + offset, data); return 0; } /** * fsl_rio_config_read - Generate a MPC85xx read maintenance transaction * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @destid: Destination ID of transaction * @hopcount: Number of hops to target device * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @val: Location to be read into * * Generates a MPC85xx read maintenance transaction. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 *val) { struct rio_priv *priv = mport->priv; u8 *data; u32 rval, err = 0; pr_debug ("fsl_rio_config_read:" " index %d destid %d hopcount %d offset %8.8x len %d\n", index, destid, hopcount, offset, len); /* 16MB maintenance window possible */ /* allow only aligned access to maintenance registers */ if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) return -EINVAL; out_be32(&priv->maint_atmu_regs->rowtar, (destid << 22) | (hopcount << 12) | (offset >> 12)); out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); switch (len) { case 1: __fsl_read_rio_config(rval, data, err, "lbz"); break; case 2: __fsl_read_rio_config(rval, data, err, "lhz"); break; case 4: __fsl_read_rio_config(rval, data, err, "lwz"); break; default: return -EINVAL; } if (err) { pr_debug("RIO: cfg_read error %d for %x:%x:%x\n", err, destid, hopcount, offset); } *val = rval; return err; } /** * fsl_rio_config_write - Generate a MPC85xx write maintenance transaction * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @destid: Destination ID of transaction * @hopcount: Number of hops to target device * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @val: Value to be written * * Generates an MPC85xx write maintenance transaction. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 val) { struct rio_priv *priv = mport->priv; u8 *data; pr_debug ("fsl_rio_config_write:" " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", index, destid, hopcount, offset, len, val); /* 16MB maintenance windows possible */ /* allow only aligned access to maintenance registers */ if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) return -EINVAL; out_be32(&priv->maint_atmu_regs->rowtar, (destid << 22) | (hopcount << 12) | (offset >> 12)); out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); switch (len) { case 1: out_8((u8 *) data, val); break; case 2: out_be16((u16 *) data, val); break; case 4: out_be32((u32 *) data, val); break; default: return -EINVAL; } return 0; } void fsl_rio_port_error_handler(int offset) { /*XXX: Error recovery is not implemented, we just clear errors */ out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); if (offset == 0) { out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0); out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), IECSR_CLEAR); out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR); } else { out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0); out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), IECSR_CLEAR); out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); } } static inline void fsl_rio_info(struct device *dev, u32 ccsr) { const char *str; if (ccsr & 1) { /* Serial phy */ switch (ccsr >> 30) { case 0: str = "1"; break; case 1: str = "4"; break; default: str = "Unknown"; break; } dev_info(dev, "Hardware port width: %s\n", str); switch ((ccsr >> 27) & 7) { case 0: str = "Single-lane 0"; break; case 1: str = "Single-lane 2"; break; case 2: str = "Four-lane"; break; default: str = "Unknown"; break; } dev_info(dev, "Training connection status: %s\n", str); } else { /* Parallel phy */ if (!(ccsr & 0x80000000)) dev_info(dev, "Output port operating in 8-bit mode\n"); if (!(ccsr & 0x08000000)) dev_info(dev, "Input port operating in 8-bit mode\n"); } } /** * fsl_rio_setup - Setup Freescale PowerPC RapidIO interface * @dev: platform_device pointer * * Initializes MPC85xx RapidIO hardware interface, configures * master port with system-specific info, and registers the * master port with the RapidIO subsystem. */ int fsl_rio_setup(struct platform_device *dev) { struct rio_ops *ops; struct rio_mport *port; struct rio_priv *priv; int rc = 0; const u32 *dt_range, *cell, *port_index; u32 active_ports = 0; struct resource regs, rmu_regs; struct device_node *np, *rmu_node; int rlen; u32 ccsr; u64 range_start, range_size; int paw, aw, sw; u32 i; static int tmp; struct device_node *rmu_np[MAX_MSG_UNIT_NUM] = {NULL}; if (!dev->dev.of_node) { dev_err(&dev->dev, "Device OF-Node is NULL"); return -ENODEV; } rc = of_address_to_resource(dev->dev.of_node, 0, &regs); if (rc) { dev_err(&dev->dev, "Can't get %s property 'reg'\n", dev->dev.of_node->full_name); return -EFAULT; } dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name); dev_info(&dev->dev, "Regs: %pR\n", &regs); rio_regs_win = ioremap(regs.start, resource_size(&regs)); if (!rio_regs_win) { dev_err(&dev->dev, "Unable to map rio register window\n"); rc = -ENOMEM; goto err_rio_regs; } ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); if (!ops) { rc = -ENOMEM; goto err_ops; } ops->lcread = fsl_local_config_read; ops->lcwrite = fsl_local_config_write; ops->cread = fsl_rio_config_read; ops->cwrite = fsl_rio_config_write; ops->dsend = fsl_rio_doorbell_send; ops->pwenable = fsl_rio_pw_enable; ops->open_outb_mbox = fsl_open_outb_mbox; ops->open_inb_mbox = fsl_open_inb_mbox; ops->close_outb_mbox = fsl_close_outb_mbox; ops->close_inb_mbox = fsl_close_inb_mbox; ops->add_outb_message = fsl_add_outb_message; ops->add_inb_buffer = fsl_add_inb_buffer; ops->get_inb_message = fsl_get_inb_message; rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0); if (!rmu_node) goto err_rmu; rc = of_address_to_resource(rmu_node, 0, &rmu_regs); if (rc) { dev_err(&dev->dev, "Can't get %s property 'reg'\n", rmu_node->full_name); goto err_rmu; } rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs)); if (!rmu_regs_win) { dev_err(&dev->dev, "Unable to map rmu register window\n"); rc = -ENOMEM; goto err_rmu; } for_each_compatible_node(np, NULL, "fsl,srio-msg-unit") { rmu_np[tmp] = np; tmp++; } /*set up doobell node*/ np = of_find_compatible_node(NULL, NULL, "fsl,srio-dbell-unit"); if (!np) { rc = -ENODEV; goto err_dbell; } dbell = kzalloc(sizeof(struct fsl_rio_dbell), GFP_KERNEL); if (!(dbell)) { dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_dbell'\n"); rc = -ENOMEM; goto err_dbell; } dbell->dev = &dev->dev; dbell->bellirq = irq_of_parse_and_map(np, 1); dev_info(&dev->dev, "bellirq: %d\n", dbell->bellirq); aw = of_n_addr_cells(np); dt_range = of_get_property(np, "reg", &rlen); if (!dt_range) { pr_err("%s: unable to find 'reg' property\n", np->full_name); rc = -ENOMEM; goto err_pw; } range_start = of_read_number(dt_range, aw); dbell->dbell_regs = (struct rio_dbell_regs *)(rmu_regs_win + (u32)range_start); /*set up port write node*/ np = of_find_compatible_node(NULL, NULL, "fsl,srio-port-write-unit"); if (!np) { rc = -ENODEV; goto err_pw; } pw = kzalloc(sizeof(struct fsl_rio_pw), GFP_KERNEL); if (!(pw)) { dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_pw'\n"); rc = -ENOMEM; goto err_pw; } pw->dev = &dev->dev; pw->pwirq = irq_of_parse_and_map(np, 0); dev_info(&dev->dev, "pwirq: %d\n", pw->pwirq); aw = of_n_addr_cells(np); dt_range = of_get_property(np, "reg", &rlen); if (!dt_range) { pr_err("%s: unable to find 'reg' property\n", np->full_name); rc = -ENOMEM; goto err; } range_start = of_read_number(dt_range, aw); pw->pw_regs = (struct rio_pw_regs *)(rmu_regs_win + (u32)range_start); /*set up ports node*/ for_each_child_of_node(dev->dev.of_node, np) { port_index = of_get_property(np, "cell-index", NULL); if (!port_index) { dev_err(&dev->dev, "Can't get %s property 'cell-index'\n", np->full_name); continue; } dt_range = of_get_property(np, "ranges", &rlen); if (!dt_range) { dev_err(&dev->dev, "Can't get %s property 'ranges'\n", np->full_name); continue; } /* Get node address wide */ cell = of_get_property(np, "#address-cells", NULL); if (cell) aw = *cell; else aw = of_n_addr_cells(np); /* Get node size wide */ cell = of_get_property(np, "#size-cells", NULL); if (cell) sw = *cell; else sw = of_n_size_cells(np); /* Get parent address wide wide */ paw = of_n_addr_cells(np); range_start = of_read_number(dt_range + aw, paw); range_size = of_read_number(dt_range + aw + paw, sw); dev_info(&dev->dev, "%s: LAW start 0x%016llx, size 0x%016llx.\n", np->full_name, range_start, range_size); port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); if (!port) continue; i = *port_index - 1; port->index = (unsigned char)i; priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); if (!priv) { dev_err(&dev->dev, "Can't alloc memory for 'priv'\n"); kfree(port); continue; } INIT_LIST_HEAD(&port->dbells); port->iores.start = range_start; port->iores.end = port->iores.start + range_size - 1; port->iores.flags = IORESOURCE_MEM; port->iores.name = "rio_io_win"; if (request_resource(&iomem_resource, &port->iores) < 0) { dev_err(&dev->dev, "RIO: Error requesting master port region" " 0x%016llx-0x%016llx\n", (u64)port->iores.start, (u64)port->iores.end); kfree(priv); kfree(port); continue; } sprintf(port->name, "RIO mport %d", i); priv->dev = &dev->dev; port->ops = ops; port->priv = priv; port->phys_efptr = 0x100; priv->regs_win = rio_regs_win; /* Probe the master port phy type */ ccsr = in_be32(priv->regs_win + RIO_CCSR + i*0x20); port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; if (port->phy_type == RIO_PHY_PARALLEL) { dev_err(&dev->dev, "RIO: Parallel PHY type, unsupported port type!\n"); release_resource(&port->iores); kfree(priv); kfree(port); continue; } dev_info(&dev->dev, "RapidIO PHY type: Serial\n"); /* Checking the port training status */ if (in_be32((priv->regs_win + RIO_ESCSR + i*0x20)) & 1) { dev_err(&dev->dev, "Port %d is not ready. " "Try to restart connection...\n", i); /* Disable ports */ out_be32(priv->regs_win + RIO_CCSR + i*0x20, 0); /* Set 1x lane */ setbits32(priv->regs_win + RIO_CCSR + i*0x20, 0x02000000); /* Enable ports */ setbits32(priv->regs_win + RIO_CCSR + i*0x20, 0x00600000); msleep(100); if (in_be32((priv->regs_win + RIO_ESCSR + i*0x20)) & 1) { dev_err(&dev->dev, "Port %d restart failed.\n", i); release_resource(&port->iores); kfree(priv); kfree(port); continue; } dev_info(&dev->dev, "Port %d restart success!\n", i); } fsl_rio_info(&dev->dev, ccsr); port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR)) & RIO_PEF_CTLS) >> 4; dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", port->sys_size ? 65536 : 256); if (rio_register_mport(port)) { release_resource(&port->iores); kfree(priv); kfree(port); continue; } if (port->host_deviceid >= 0) out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); else out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_MASTER); priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win + ((i == 0) ? RIO_ATMU_REGS_PORT1_OFFSET : RIO_ATMU_REGS_PORT2_OFFSET)); priv->maint_atmu_regs = priv->atmu_regs + 1; /* Set to receive any dist ID for serial RapidIO controller. */ if (port->phy_type == RIO_PHY_SERIAL) out_be32((priv->regs_win + RIO_ISR_AACR + i*0x80), RIO_ISR_AACR_AA); /* Configure maintenance transaction window */ out_be32(&priv->maint_atmu_regs->rowbar, port->iores.start >> 12); out_be32(&priv->maint_atmu_regs->rowar, 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1)); priv->maint_win = ioremap(port->iores.start, RIO_MAINT_WIN_SIZE); rio_law_start = range_start; fsl_rio_setup_rmu(port, rmu_np[i]); dbell->mport[i] = port; active_ports++; } if (!active_ports) { rc = -ENOLINK; goto err; } fsl_rio_doorbell_init(dbell); fsl_rio_port_write_init(pw); return 0; err: kfree(pw); err_pw: kfree(dbell); err_dbell: iounmap(rmu_regs_win); err_rmu: kfree(ops); err_ops: iounmap(rio_regs_win); err_rio_regs: return rc; } /* The probe function for RapidIO peer-to-peer network. */ static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev) { printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", dev->dev.of_node->full_name); return fsl_rio_setup(dev); }; static const struct of_device_id fsl_of_rio_rpn_ids[] = { { .compatible = "fsl,srio", }, {}, }; static struct platform_driver fsl_of_rio_rpn_driver = { .driver = { .name = "fsl-of-rio", .owner = THIS_MODULE, .of_match_table = fsl_of_rio_rpn_ids, }, .probe = fsl_of_rio_rpn_probe, }; static __init int fsl_of_rio_rpn_init(void) { return platform_driver_register(&fsl_of_rio_rpn_driver); } subsys_initcall(fsl_of_rio_rpn_init);
gpl-2.0
HtcLegacy/android_kernel_htc_msm7x27a
arch/mips/mm/highmem.c
4597
3009
#include <linux/module.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/smp.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> static pte_t *kmap_pte; unsigned long highstart_pfn, highend_pfn; void *kmap(struct page *page) { void *addr; might_sleep(); if (!PageHighMem(page)) return page_address(page); addr = kmap_high(page); flush_tlb_one((unsigned long)addr); return addr; } EXPORT_SYMBOL(kmap); void kunmap(struct page *page) { BUG_ON(in_interrupt()); if (!PageHighMem(page)) return; kunmap_high(page); } EXPORT_SYMBOL(kunmap); /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ void *kmap_atomic(struct page *page) { unsigned long vaddr; int idx, type; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte - idx))); #endif set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); local_flush_tlb_one((unsigned long)vaddr); return (void*) vaddr; } EXPORT_SYMBOL(kmap_atomic); void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; int type; if (vaddr < FIXADDR_START) { // FIXME pagefault_enable(); return; } type = kmap_atomic_idx(); #ifdef CONFIG_DEBUG_HIGHMEM { int idx = type + KM_TYPE_NR * smp_processor_id(); BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); /* * force other mappings to Oops if they'll try to access * this pte without first remap it */ pte_clear(&init_mm, vaddr, kmap_pte-idx); local_flush_tlb_one(vaddr); } #endif kmap_atomic_idx_pop(); pagefault_enable(); } EXPORT_SYMBOL(__kunmap_atomic); /* * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ void *kmap_atomic_pfn(unsigned long pfn) { unsigned long vaddr; int idx, type; pagefault_disable(); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); flush_tlb_one(vaddr); return (void*) vaddr; } struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; pte_t *pte; if (vaddr < FIXADDR_START) return virt_to_page(ptr); idx = virt_to_fix(vaddr); pte = kmap_pte - (idx - FIX_KMAP_BEGIN); return pte_page(*pte); } void __init kmap_init(void) { unsigned long kmap_vstart; /* cache the first kmap pte */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); }
gpl-2.0
ztemt/U9180_kernel
drivers/staging/vt6656/bssdb.c
4853
53049
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: bssdb.c * * Purpose: Handles the Basic Service Set & Node Database functions * * Functions: * BSSpSearchBSSList - Search known BSS list for Desire SSID or BSSID * BSSvClearBSSList - Clear BSS List * BSSbInsertToBSSList - Insert a BSS set into known BSS list * BSSbUpdateToBSSList - Update BSS set in known BSS list * BSSbIsSTAInNodeDB - Search Node DB table to find the index of matched DstAddr * BSSvCreateOneNode - Allocate an Node for Node DB * BSSvUpdateAPNode - Update AP Node content in Index 0 of KnownNodeDB * BSSvSecondCallBack - One second timer callback function to update Node DB info & AP link status * BSSvUpdateNodeTxCounter - Update Tx attemps, Tx failure counter in Node DB for auto-fall back rate control * * Revision History: * * Author: Lyndon Chen * * Date: July 17, 2002 * */ #include "ttype.h" #include "tmacro.h" #include "tether.h" #include "device.h" #include "80211hdr.h" #include "bssdb.h" #include "wmgr.h" #include "datarate.h" #include "desc.h" #include "wcmd.h" #include "wpa.h" #include "baseband.h" #include "rf.h" #include "card.h" #include "mac.h" #include "wpa2.h" #include "control.h" #include "rndis.h" #include "iowpa.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ static int msglevel =MSG_LEVEL_INFO; //static int msglevel =MSG_LEVEL_DEBUG; const WORD awHWRetry0[5][5] = { {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M}, {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M}, {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M}, {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M}, {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M} }; const WORD awHWRetry1[5][5] = { {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M}, {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M}, {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M}, {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M}, {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M} }; /*--------------------- Static Functions --------------------------*/ void s_vCheckSensitivity(void *hDeviceContext); void s_vCheckPreEDThreshold(void *hDeviceContext); void s_uCalculateLinkQual(void *hDeviceContext); /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /*+ * * Routine Description: * Search known BSS list for Desire SSID or BSSID. * * Return Value: * PTR to KnownBSS or NULL * -*/ PKnownBSS BSSpSearchBSSList(void *hDeviceContext, PBYTE pbyDesireBSSID, PBYTE pbyDesireSSID, CARD_PHY_TYPE ePhyType) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PBYTE pbyBSSID = NULL; PWLAN_IE_SSID pSSID = NULL; PKnownBSS pCurrBSS = NULL; PKnownBSS pSelect = NULL; BYTE ZeroBSSID[WLAN_BSSID_LEN]={0x00,0x00,0x00,0x00,0x00,0x00}; unsigned int ii = 0; unsigned int jj = 0; if (pbyDesireBSSID != NULL) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSpSearchBSSList BSSID[%02X %02X %02X-%02X %02X %02X]\n", *pbyDesireBSSID,*(pbyDesireBSSID+1),*(pbyDesireBSSID+2), *(pbyDesireBSSID+3),*(pbyDesireBSSID+4),*(pbyDesireBSSID+5)); if ((!is_broadcast_ether_addr(pbyDesireBSSID)) && (memcmp(pbyDesireBSSID, ZeroBSSID, 6)!= 0)){ pbyBSSID = pbyDesireBSSID; } } if (pbyDesireSSID != NULL) { if (((PWLAN_IE_SSID)pbyDesireSSID)->len != 0) { pSSID = (PWLAN_IE_SSID) pbyDesireSSID; } } if ((pbyBSSID != NULL)&&(pDevice->bRoaming == FALSE)) { // match BSSID first for (ii = 0; ii <MAX_BSS_NUM; ii++) { pCurrBSS = &(pMgmt->sBSSList[ii]); pCurrBSS->bSelected = FALSE; if ((pCurrBSS->bActive) && (pCurrBSS->bSelected == FALSE)) { if (!compare_ether_addr(pCurrBSS->abyBSSID, pbyBSSID)) { if (pSSID != NULL) { // compare ssid if ( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID, pSSID->len)) { if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) || ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) || ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ) { pCurrBSS->bSelected = TRUE; return(pCurrBSS); } } } else { if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) || ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) || ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ) { pCurrBSS->bSelected = TRUE; return(pCurrBSS); } } } } } } else { // ignore BSSID for (ii = 0; ii <MAX_BSS_NUM; ii++) { pCurrBSS = &(pMgmt->sBSSList[ii]); //2007-0721-01<Mark>by MikeLiu // if ((pCurrBSS->bActive) && // (pCurrBSS->bSelected == FALSE)) { pCurrBSS->bSelected = FALSE; if (pCurrBSS->bActive) { if (pSSID != NULL) { // matched SSID if (memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID, pSSID->len) || (pSSID->len != ((PWLAN_IE_SSID)pCurrBSS->abySSID)->len)) { // SSID not match skip this BSS continue; } } if (((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) || ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ){ // Type not match skip this BSS DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSS type mismatch.... Config[%d] BSS[0x%04x]\n", pMgmt->eConfigMode, pCurrBSS->wCapInfo); continue; } if (ePhyType != PHY_TYPE_AUTO) { if (((ePhyType == PHY_TYPE_11A) && (PHY_TYPE_11A != pCurrBSS->eNetworkTypeInUse)) || ((ePhyType != PHY_TYPE_11A) && (PHY_TYPE_11A == pCurrBSS->eNetworkTypeInUse))) { // PhyType not match skip this BSS DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Physical type mismatch.... ePhyType[%d] BSS[%d]\n", ePhyType, pCurrBSS->eNetworkTypeInUse); continue; } } pMgmt->pSameBSS[jj].uChannel = pCurrBSS->uChannel; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSpSearchBSSList pSelect1[%02X %02X %02X-%02X %02X %02X]\n",*pCurrBSS->abyBSSID,*(pCurrBSS->abyBSSID+1),*(pCurrBSS->abyBSSID+2),*(pCurrBSS->abyBSSID+3),*(pCurrBSS->abyBSSID+4),*(pCurrBSS->abyBSSID+5)); jj++; if (pSelect == NULL) { pSelect = pCurrBSS; } else { // compare RSSI, select signal strong one if (pCurrBSS->uRSSI < pSelect->uRSSI) { pSelect = pCurrBSS; } } } } pDevice->bSameBSSMaxNum = jj; if (pSelect != NULL) { pSelect->bSelected = TRUE; if (pDevice->bRoaming == FALSE) { // Einsn Add @20070907 memset(pbyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); memcpy(pbyDesireSSID,pCurrBSS->abySSID,WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1) ; } return(pSelect); } } return(NULL); } /*+ * * Routine Description: * Clear BSS List * * Return Value: * None. * -*/ void BSSvClearBSSList(void *hDeviceContext, BOOL bKeepCurrBSSID) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned int ii; for (ii = 0; ii < MAX_BSS_NUM; ii++) { if (bKeepCurrBSSID) { if (pMgmt->sBSSList[ii].bActive && !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID, pMgmt->abyCurrBSSID)) { //mike mark: there are two same BSSID in list if that AP is in hidden ssid mode,one 's SSID is null, // but other's is obvious, so if it acssociate with your STA exactly,you must keep two // of them!!!!!!!!! // bKeepCurrBSSID = FALSE; continue; } } pMgmt->sBSSList[ii].bActive = FALSE; memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS)); } BSSvClearAnyBSSJoinRecord(pDevice); } /*+ * * Routine Description: * search BSS list by BSSID & SSID if matched * * Return Value: * TRUE if found. * -*/ PKnownBSS BSSpAddrIsInBSSList(void *hDeviceContext, PBYTE abyBSSID, PWLAN_IE_SSID pSSID) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PKnownBSS pBSSList = NULL; unsigned int ii; for (ii = 0; ii < MAX_BSS_NUM; ii++) { pBSSList = &(pMgmt->sBSSList[ii]); if (pBSSList->bActive) { if (!compare_ether_addr(pBSSList->abyBSSID, abyBSSID)) { if (pSSID->len == ((PWLAN_IE_SSID)pBSSList->abySSID)->len){ if (memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pBSSList->abySSID)->abySSID, pSSID->len) == 0) return pBSSList; } } } } return NULL; }; /*+ * * Routine Description: * Insert a BSS set into known BSS list * * Return Value: * TRUE if success. * -*/ BOOL BSSbInsertToBSSList(void *hDeviceContext, PBYTE abyBSSIDAddr, QWORD qwTimestamp, WORD wBeaconInterval, WORD wCapInfo, BYTE byCurrChannel, PWLAN_IE_SSID pSSID, PWLAN_IE_SUPP_RATES pSuppRates, PWLAN_IE_SUPP_RATES pExtSuppRates, PERPObject psERP, PWLAN_IE_RSN pRSN, PWLAN_IE_RSN_EXT pRSNWPA, PWLAN_IE_COUNTRY pIE_Country, PWLAN_IE_QUIET pIE_Quiet, unsigned int uIELength, PBYTE pbyIEs, void *pRxPacketContext) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext; PKnownBSS pBSSList = NULL; unsigned int ii; BOOL bParsingQuiet = FALSE; pBSSList = (PKnownBSS)&(pMgmt->sBSSList[0]); for (ii = 0; ii < MAX_BSS_NUM; ii++) { pBSSList = (PKnownBSS)&(pMgmt->sBSSList[ii]); if (!pBSSList->bActive) break; } if (ii == MAX_BSS_NUM){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get free KnowBSS node failed.\n"); return FALSE; } // save the BSS info pBSSList->bActive = TRUE; memcpy( pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN); HIDWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(HIDWORD(qwTimestamp)); LODWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(LODWORD(qwTimestamp)); pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval); pBSSList->wCapInfo = cpu_to_le16(wCapInfo); pBSSList->uClearCount = 0; if (pSSID->len > WLAN_SSID_MAXLEN) pSSID->len = WLAN_SSID_MAXLEN; memcpy( pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN); pBSSList->uChannel = byCurrChannel; if (pSuppRates->len > WLAN_RATES_MAXLEN) pSuppRates->len = WLAN_RATES_MAXLEN; memcpy( pBSSList->abySuppRates, pSuppRates, pSuppRates->len + WLAN_IEHDR_LEN); if (pExtSuppRates != NULL) { if (pExtSuppRates->len > WLAN_RATES_MAXLEN) pExtSuppRates->len = WLAN_RATES_MAXLEN; memcpy(pBSSList->abyExtSuppRates, pExtSuppRates, pExtSuppRates->len + WLAN_IEHDR_LEN); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSbInsertToBSSList: pExtSuppRates->len = %d\n", pExtSuppRates->len); } else { memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1); } pBSSList->sERP.byERP = psERP->byERP; pBSSList->sERP.bERPExist = psERP->bERPExist; // Check if BSS is 802.11a/b/g if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) { pBSSList->eNetworkTypeInUse = PHY_TYPE_11A; } else { if (pBSSList->sERP.bERPExist == TRUE) { pBSSList->eNetworkTypeInUse = PHY_TYPE_11G; } else { pBSSList->eNetworkTypeInUse = PHY_TYPE_11B; } } pBSSList->byRxRate = pRxPacket->byRxRate; pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF; pBSSList->uRSSI = pRxPacket->uRSSI; pBSSList->bySQ = pRxPacket->bySQ; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) { // assoc with BSS if (pBSSList == pMgmt->pCurrBSS) { bParsingQuiet = TRUE; } } WPA_ClearRSN(pBSSList); if (pRSNWPA != NULL) { unsigned int uLen = pRSNWPA->len + 2; if (uLen <= (uIELength - (unsigned int) (ULONG_PTR) ((PBYTE) pRSNWPA - pbyIEs))) { pBSSList->wWPALen = uLen; memcpy(pBSSList->byWPAIE, pRSNWPA, uLen); WPA_ParseRSN(pBSSList, pRSNWPA); } } WPA2_ClearRSN(pBSSList); if (pRSN != NULL) { unsigned int uLen = pRSN->len + 2; if (uLen <= (uIELength - (unsigned int) (ULONG_PTR) ((PBYTE) pRSN - pbyIEs))) { pBSSList->wRSNLen = uLen; memcpy(pBSSList->byRSNIE, pRSN, uLen); WPA2vParseRSN(pBSSList, pRSN); } } if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pBSSList->bWPA2Valid == TRUE)) { PSKeyItem pTransmitKey = NULL; BOOL bIs802_1x = FALSE; for (ii = 0; ii < pBSSList->wAKMSSAuthCount; ii ++) { if (pBSSList->abyAKMSSAuthType[ii] == WLAN_11i_AKMSS_802_1X) { bIs802_1x = TRUE; break; } } if ((bIs802_1x == TRUE) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) && ( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) { bAdd_PMKID_Candidate((void *) pDevice, pBSSList->abyBSSID, &pBSSList->sRSNCapObj); if ((pDevice->bLinkPass == TRUE) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) { if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == TRUE) || (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey) == TRUE)) { pDevice->gsPMKIDCandidate.StatusType = Ndis802_11StatusType_PMKID_CandidateList; pDevice->gsPMKIDCandidate.Version = 1; } } } } if (pDevice->bUpdateBBVGA) { // Moniter if RSSI is too strong. pBSSList->byRSSIStatCnt = 0; RFvRSSITodBm(pDevice, (BYTE)(pRxPacket->uRSSI), &pBSSList->ldBmMAX); pBSSList->ldBmAverage[0] = pBSSList->ldBmMAX; pBSSList->ldBmAverRange = pBSSList->ldBmMAX; for (ii = 1; ii < RSSI_STAT_COUNT; ii++) pBSSList->ldBmAverage[ii] = 0; } pBSSList->uIELength = uIELength; if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN) pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN; memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength); return TRUE; } /*+ * * Routine Description: * Update BSS set in known BSS list * * Return Value: * TRUE if success. * -*/ // TODO: input structure modify BOOL BSSbUpdateToBSSList(void *hDeviceContext, QWORD qwTimestamp, WORD wBeaconInterval, WORD wCapInfo, BYTE byCurrChannel, BOOL bChannelHit, PWLAN_IE_SSID pSSID, PWLAN_IE_SUPP_RATES pSuppRates, PWLAN_IE_SUPP_RATES pExtSuppRates, PERPObject psERP, PWLAN_IE_RSN pRSN, PWLAN_IE_RSN_EXT pRSNWPA, PWLAN_IE_COUNTRY pIE_Country, PWLAN_IE_QUIET pIE_Quiet, PKnownBSS pBSSList, unsigned int uIELength, PBYTE pbyIEs, void *pRxPacketContext) { int ii, jj; PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext; signed long ldBm, ldBmSum; BOOL bParsingQuiet = FALSE; if (pBSSList == NULL) return FALSE; HIDWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(HIDWORD(qwTimestamp)); LODWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(LODWORD(qwTimestamp)); pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval); pBSSList->wCapInfo = cpu_to_le16(wCapInfo); pBSSList->uClearCount = 0; pBSSList->uChannel = byCurrChannel; if (pSSID->len > WLAN_SSID_MAXLEN) pSSID->len = WLAN_SSID_MAXLEN; if ((pSSID->len != 0) && (pSSID->abySSID[0] != 0)) memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN); memcpy(pBSSList->abySuppRates, pSuppRates,pSuppRates->len + WLAN_IEHDR_LEN); if (pExtSuppRates != NULL) { memcpy(pBSSList->abyExtSuppRates, pExtSuppRates,pExtSuppRates->len + WLAN_IEHDR_LEN); } else { memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1); } pBSSList->sERP.byERP = psERP->byERP; pBSSList->sERP.bERPExist = psERP->bERPExist; // Check if BSS is 802.11a/b/g if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) { pBSSList->eNetworkTypeInUse = PHY_TYPE_11A; } else { if (pBSSList->sERP.bERPExist == TRUE) { pBSSList->eNetworkTypeInUse = PHY_TYPE_11G; } else { pBSSList->eNetworkTypeInUse = PHY_TYPE_11B; } } pBSSList->byRxRate = pRxPacket->byRxRate; pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF; if(bChannelHit) pBSSList->uRSSI = pRxPacket->uRSSI; pBSSList->bySQ = pRxPacket->bySQ; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) { // assoc with BSS if (pBSSList == pMgmt->pCurrBSS) { bParsingQuiet = TRUE; } } WPA_ClearRSN(pBSSList); //mike update if (pRSNWPA != NULL) { unsigned int uLen = pRSNWPA->len + 2; if (uLen <= (uIELength - (unsigned int) (ULONG_PTR) ((PBYTE) pRSNWPA - pbyIEs))) { pBSSList->wWPALen = uLen; memcpy(pBSSList->byWPAIE, pRSNWPA, uLen); WPA_ParseRSN(pBSSList, pRSNWPA); } } WPA2_ClearRSN(pBSSList); //mike update if (pRSN != NULL) { unsigned int uLen = pRSN->len + 2; if (uLen <= (uIELength - (unsigned int) (ULONG_PTR) ((PBYTE) pRSN - pbyIEs))) { pBSSList->wRSNLen = uLen; memcpy(pBSSList->byRSNIE, pRSN, uLen); WPA2vParseRSN(pBSSList, pRSN); } } if (pRxPacket->uRSSI != 0) { RFvRSSITodBm(pDevice, (BYTE)(pRxPacket->uRSSI), &ldBm); // Moniter if RSSI is too strong. pBSSList->byRSSIStatCnt++; pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT; pBSSList->ldBmAverage[pBSSList->byRSSIStatCnt] = ldBm; ldBmSum = 0; for (ii = 0, jj = 0; ii < RSSI_STAT_COUNT; ii++) { if (pBSSList->ldBmAverage[ii] != 0) { pBSSList->ldBmMAX = max(pBSSList->ldBmAverage[ii], ldBm); ldBmSum += pBSSList->ldBmAverage[ii]; jj++; } } pBSSList->ldBmAverRange = ldBmSum /jj; } pBSSList->uIELength = uIELength; if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN) pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN; memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength); return TRUE; } /*+ * * Routine Description: * Search Node DB table to find the index of matched DstAddr * * Return Value: * None * -*/ BOOL BSSbIsSTAInNodeDB(void *hDeviceContext, PBYTE abyDstAddr, unsigned int *puNodeIndex) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned int ii; // Index = 0 reserved for AP Node for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive) { if (!compare_ether_addr(abyDstAddr, pMgmt->sNodeDBTable[ii].abyMACAddr)) { *puNodeIndex = ii; return TRUE; } } } return FALSE; }; /*+ * * Routine Description: * Find an empty node and allocated; if no empty found, * instand used of most inactive one. * * Return Value: * None * -*/ void BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned int ii; unsigned int BigestCount = 0; unsigned int SelectIndex; struct sk_buff *skb; // Index = 0 reserved for AP Node (In STA mode) // Index = 0 reserved for Broadcast/MultiCast (In AP mode) SelectIndex = 1; for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive) { if (pMgmt->sNodeDBTable[ii].uInActiveCount > BigestCount) { BigestCount = pMgmt->sNodeDBTable[ii].uInActiveCount; SelectIndex = ii; } } else { break; } } // if not found replace uInActiveCount is largest one. if ( ii == (MAX_NODE_NUM + 1)) { *puNodeIndex = SelectIndex; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Replace inactive node = %d\n", SelectIndex); // clear ps buffer if (pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue.next != NULL) { while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue)) != NULL) dev_kfree_skb(skb); } } else { *puNodeIndex = ii; } memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB)); pMgmt->sNodeDBTable[*puNodeIndex].bActive = TRUE; pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND; // for AP mode PS queue skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue); pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0; pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create node index = %d\n", ii); }; /*+ * * Routine Description: * Remove Node by NodeIndex * * * Return Value: * None * -*/ void BSSvRemoveOneNode(void *hDeviceContext, unsigned int uNodeIndex) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80}; struct sk_buff *skb; while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue)) != NULL) dev_kfree_skb(skb); // clear context memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB)); // clear tx bit map pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7]; }; /*+ * * Routine Description: * Update AP Node content in Index 0 of KnownNodeDB * * * Return Value: * None * -*/ void BSSvUpdateAPNode(void *hDeviceContext, PWORD pwCapInfo, PWLAN_IE_SUPP_RATES pSuppRates, PWLAN_IE_SUPP_RATES pExtSuppRates) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned int uRateLen = WLAN_RATES_MAXLEN; memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB)); pMgmt->sNodeDBTable[0].bActive = TRUE; if (pDevice->byBBType == BB_TYPE_11B) { uRateLen = WLAN_RATES_MAXLEN_11B; } pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, uRateLen); pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pExtSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, uRateLen); RATEvParseMaxRate((void *) pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, TRUE, &(pMgmt->sNodeDBTable[0].wMaxBasicRate), &(pMgmt->sNodeDBTable[0].wMaxSuppRate), &(pMgmt->sNodeDBTable[0].wSuppRate), &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate), &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate) ); memcpy(pMgmt->sNodeDBTable[0].abyMACAddr, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN); pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxSuppRate; pMgmt->sNodeDBTable[0].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*pwCapInfo); pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND; // Auto rate fallback function initiation. // RATEbInit(pDevice); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pMgmt->sNodeDBTable[0].wTxDataRate = %d \n", pMgmt->sNodeDBTable[0].wTxDataRate); }; /*+ * * Routine Description: * Add Multicast Node content in Index 0 of KnownNodeDB * * * Return Value: * None * -*/ void BSSvAddMulticastNode(void *hDeviceContext) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); if (!pDevice->bEnableHostWEP) memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB)); memset(pMgmt->sNodeDBTable[0].abyMACAddr, 0xff, WLAN_ADDR_LEN); pMgmt->sNodeDBTable[0].bActive = TRUE; pMgmt->sNodeDBTable[0].bPSEnable = FALSE; skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue); RATEvParseMaxRate((void *) pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, TRUE, &(pMgmt->sNodeDBTable[0].wMaxBasicRate), &(pMgmt->sNodeDBTable[0].wMaxSuppRate), &(pMgmt->sNodeDBTable[0].wSuppRate), &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate), &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate) ); pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxBasicRate; pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND; }; /*+ * * Routine Description: * * * Second call back function to update Node DB info & AP link status * * * Return Value: * none. * -*/ void BSSvSecondCallBack(void *hDeviceContext) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned int ii; PWLAN_IE_SSID pItemSSID, pCurrSSID; unsigned int uSleepySTACnt = 0; unsigned int uNonShortSlotSTACnt = 0; unsigned int uLongPreambleSTACnt = 0; viawget_wpa_header *wpahdr; spin_lock_irq(&pDevice->lock); pDevice->uAssocCount = 0; //Power Saving Mode Tx Burst if ( pDevice->bEnablePSMode == TRUE ) { pDevice->ulPSModeWaitTx++; if ( pDevice->ulPSModeWaitTx >= 2 ) { pDevice->ulPSModeWaitTx = 0; pDevice->bPSModeTxBurst = FALSE; } } pDevice->byERPFlag &= ~(WLAN_SET_ERP_BARKER_MODE(1) | WLAN_SET_ERP_NONERP_PRESENT(1)); if (pDevice->wUseProtectCntDown > 0) { pDevice->wUseProtectCntDown --; } else { // disable protect mode pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1)); } if(pDevice->byReAssocCount > 0) { pDevice->byReAssocCount++; if((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != TRUE)) { //10 sec timeout printk("Re-association timeout!!!\n"); pDevice->byReAssocCount = 0; #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT // if(pDevice->bWPASuppWextEnabled == TRUE) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif } else if(pDevice->bLinkPass == TRUE) pDevice->byReAssocCount = 0; } if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) && (pMgmt->eLastState==WMAC_STATE_ASSOC)) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.flags = RT_DISCONNECTED_EVENT_FLAG; wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, NULL); } pMgmt->eLastState = pMgmt->eCurrState ; s_uCalculateLinkQual((void *)pDevice); for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive) { // Increase in-activity counter pMgmt->sNodeDBTable[ii].uInActiveCount++; if (ii > 0) { if (pMgmt->sNodeDBTable[ii].uInActiveCount > MAX_INACTIVE_COUNT) { BSSvRemoveOneNode(pDevice, ii); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Inactive timeout [%d] sec, STA index = [%d] remove\n", MAX_INACTIVE_COUNT, ii); continue; } if (pMgmt->sNodeDBTable[ii].eNodeState >= NODE_ASSOC) { pDevice->uAssocCount++; // check if Non ERP exist if (pMgmt->sNodeDBTable[ii].uInActiveCount < ERP_RECOVER_COUNT) { if (!pMgmt->sNodeDBTable[ii].bShortPreamble) { pDevice->byERPFlag |= WLAN_SET_ERP_BARKER_MODE(1); uLongPreambleSTACnt ++; } if (!pMgmt->sNodeDBTable[ii].bERPExist) { pDevice->byERPFlag |= WLAN_SET_ERP_NONERP_PRESENT(1); pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1); } if (!pMgmt->sNodeDBTable[ii].bShortSlotTime) uNonShortSlotSTACnt++; } } // check if any STA in PS mode if (pMgmt->sNodeDBTable[ii].bPSEnable) uSleepySTACnt++; } // Rate fallback check if (!pDevice->bFixRate) { if (ii > 0) { // ii = 0 for multicast node (AP & Adhoc) RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii])); } else { // ii = 0 reserved for unicast AP node (Infra STA) if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii])); } } // check if pending PS queue if (pMgmt->sNodeDBTable[ii].wEnQueueCnt != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index= %d, Queue = %d pending \n", ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt); if ((ii >0) && (pMgmt->sNodeDBTable[ii].wEnQueueCnt > 15)) { BSSvRemoveOneNode(pDevice, ii); DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Pending many queues PS STA Index = %d remove \n", ii); continue; } } } } if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->byBBType == BB_TYPE_11G)) { // on/off protect mode if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) { if (!pDevice->bProtectMode) { MACvEnableProtectMD(pDevice); pDevice->bProtectMode = TRUE; } } else { if (pDevice->bProtectMode) { MACvDisableProtectMD(pDevice); pDevice->bProtectMode = FALSE; } } // on/off short slot time if (uNonShortSlotSTACnt > 0) { if (pDevice->bShortSlotTime) { pDevice->bShortSlotTime = FALSE; BBvSetShortSlotTime(pDevice); vUpdateIFS((void *)pDevice); } } else { if (!pDevice->bShortSlotTime) { pDevice->bShortSlotTime = TRUE; BBvSetShortSlotTime(pDevice); vUpdateIFS((void *)pDevice); } } // on/off barker long preamble mode if (uLongPreambleSTACnt > 0) { if (!pDevice->bBarkerPreambleMd) { MACvEnableBarkerPreambleMd(pDevice); pDevice->bBarkerPreambleMd = TRUE; } } else { if (pDevice->bBarkerPreambleMd) { MACvDisableBarkerPreambleMd(pDevice); pDevice->bBarkerPreambleMd = FALSE; } } } // Check if any STA in PS mode, enable DTIM multicast deliver if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { if (uSleepySTACnt > 0) pMgmt->sNodeDBTable[0].bPSEnable = TRUE; else pMgmt->sNodeDBTable[0].bPSEnable = FALSE; } pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; pCurrSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; if ((pMgmt->eCurrMode == WMAC_MODE_STANDBY) || (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) { if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS if (pDevice->bUpdateBBVGA) { /* s_vCheckSensitivity((void *) pDevice); */ s_vCheckPreEDThreshold((void *) pDevice); } if ((pMgmt->sNodeDBTable[0].uInActiveCount >= (LOST_BEACON_COUNT/2)) && (pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) ) { pDevice->byBBVGANew = pDevice->abyBBVGA[0]; bScheduleCommand((void *) pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL); } if (pMgmt->sNodeDBTable[0].uInActiveCount >= LOST_BEACON_COUNT) { pMgmt->sNodeDBTable[0].bActive = FALSE; pMgmt->eCurrMode = WMAC_MODE_STANDBY; pMgmt->eCurrState = WMAC_STATE_IDLE; netif_stop_queue(pDevice->dev); pDevice->bLinkPass = FALSE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW); pDevice->bRoaming = TRUE; pDevice->bIsRoaming = FALSE; DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost AP beacon [%d] sec, disconnected !\n", pMgmt->sNodeDBTable[0].uInActiveCount); /* let wpa supplicant know AP may disconnect */ if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) { wpahdr = (viawget_wpa_header *)pDevice->skb->data; wpahdr->type = VIAWGET_DISASSOC_MSG; wpahdr->resp_ie_len = 0; wpahdr->req_ie_len = 0; skb_put(pDevice->skb, sizeof(viawget_wpa_header)); pDevice->skb->dev = pDevice->wpadev; skb_reset_mac_header(pDevice->skb); pDevice->skb->pkt_type = PACKET_HOST; pDevice->skb->protocol = htons(ETH_P_802_2); memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb)); netif_rx(pDevice->skb); pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); } #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif } } else if (pItemSSID->len != 0) { //Davidwang if ((pDevice->bEnableRoaming == TRUE)&&(!(pMgmt->Cisco_cckm))) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bRoaming %d, !\n", pDevice->bRoaming ); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming ); if ((pDevice->bRoaming == TRUE)&&(pDevice->bIsRoaming == TRUE)){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Fast Roaming ...\n"); BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass); bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID); bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID); pDevice->uAutoReConnectTime = 0; pDevice->uIsroamingTime = 0; pDevice->bRoaming = FALSE; wpahdr = (viawget_wpa_header *)pDevice->skb->data; wpahdr->type = VIAWGET_CCKM_ROAM_MSG; wpahdr->resp_ie_len = 0; wpahdr->req_ie_len = 0; skb_put(pDevice->skb, sizeof(viawget_wpa_header)); pDevice->skb->dev = pDevice->wpadev; skb_reset_mac_header(pDevice->skb); pDevice->skb->pkt_type = PACKET_HOST; pDevice->skb->protocol = htons(ETH_P_802_2); memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb)); netif_rx(pDevice->skb); pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); } else if ((pDevice->bRoaming == FALSE)&&(pDevice->bIsRoaming == TRUE)) { pDevice->uIsroamingTime++; if (pDevice->uIsroamingTime >= 20) pDevice->bIsRoaming = FALSE; } } else { if (pDevice->uAutoReConnectTime < 10) { pDevice->uAutoReConnectTime++; #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT //network manager support need not do Roaming scan??? if(pDevice->bWPASuppWextEnabled ==TRUE) pDevice->uAutoReConnectTime = 0; #endif } else { //mike use old encryption status for wpa reauthen if(pDevice->bWPADEVUp) pDevice->eEncryptionStatus = pDevice->eOldEncryptionStatus; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming ...\n"); BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass); pMgmt->eScanType = WMAC_SCAN_ACTIVE; bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID); bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID); pDevice->uAutoReConnectTime = 0; } } } } if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { // if adhoc started which essid is NULL string, rescanning. if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) { if (pDevice->uAutoReConnectTime < 10) { pDevice->uAutoReConnectTime++; } else { DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scanning ...\n"); pMgmt->eScanType = WMAC_SCAN_ACTIVE; bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL); pDevice->uAutoReConnectTime = 0; }; } if (pMgmt->eCurrState == WMAC_STATE_JOINTED) { if (pDevice->bUpdateBBVGA) { /* s_vCheckSensitivity((void *) pDevice); */ s_vCheckPreEDThreshold((void *) pDevice); } if (pMgmt->sNodeDBTable[0].uInActiveCount >=ADHOC_LOST_BEACON_COUNT) { DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost other STA beacon [%d] sec, started !\n", pMgmt->sNodeDBTable[0].uInActiveCount); pMgmt->sNodeDBTable[0].uInActiveCount = 0; pMgmt->eCurrState = WMAC_STATE_STARTED; netif_stop_queue(pDevice->dev); pDevice->bLinkPass = FALSE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW); } } } if (pDevice->bLinkPass == TRUE) { if (netif_queue_stopped(pDevice->dev)) netif_wake_queue(pDevice->dev); } spin_unlock_irq(&pDevice->lock); pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ); add_timer(&pMgmt->sTimerSecondCallback); } /*+ * * Routine Description: * * * Update Tx attemps, Tx failure counter in Node DB * * * Return Value: * none. * -*/ void BSSvUpdateNodeTxCounter(void *hDeviceContext, PSStatCounter pStatistic, BYTE byTSR, BYTE byPktNO) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned int uNodeIndex = 0; BYTE byTxRetry; WORD wRate; WORD wFallBackRate = RATE_1M; BYTE byFallBack; unsigned int ii; PBYTE pbyDestAddr; BYTE byPktNum; WORD wFIFOCtl; byPktNum = (byPktNO & 0x0F) >> 4; byTxRetry = (byTSR & 0xF0) >> 4; wRate = (WORD) (byPktNO & 0xF0) >> 4; wFIFOCtl = pStatistic->abyTxPktInfo[byPktNum].wFIFOCtl; pbyDestAddr = (PBYTE) &( pStatistic->abyTxPktInfo[byPktNum].abyDestAddr[0]); if (wFIFOCtl & FIFOCTL_AUTO_FB_0) { byFallBack = AUTO_FB_0; } else if (wFIFOCtl & FIFOCTL_AUTO_FB_1) { byFallBack = AUTO_FB_1; } else { byFallBack = AUTO_FB_NONE; } // Only Unicast using support rates if (wFIFOCtl & FIFOCTL_NEEDACK) { if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) { pMgmt->sNodeDBTable[0].uTxAttempts += 1; if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) { // transmit success, TxAttempts at least plus one pMgmt->sNodeDBTable[0].uTxOk[MAX_RATE]++; if ( (byFallBack == AUTO_FB_NONE) || (wRate < RATE_18M) ) { wFallBackRate = wRate; } else if (byFallBack == AUTO_FB_0) { if (byTxRetry < 5) wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry]; else wFallBackRate = awHWRetry0[wRate-RATE_18M][4]; } else if (byFallBack == AUTO_FB_1) { if (byTxRetry < 5) wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry]; else wFallBackRate = awHWRetry1[wRate-RATE_18M][4]; } pMgmt->sNodeDBTable[0].uTxOk[wFallBackRate]++; } else { pMgmt->sNodeDBTable[0].uTxFailures ++; } pMgmt->sNodeDBTable[0].uTxRetry += byTxRetry; if (byTxRetry != 0) { pMgmt->sNodeDBTable[0].uTxFail[MAX_RATE]+=byTxRetry; if ( (byFallBack == AUTO_FB_NONE) || (wRate < RATE_18M) ) { pMgmt->sNodeDBTable[0].uTxFail[wRate]+=byTxRetry; } else if (byFallBack == AUTO_FB_0) { for (ii = 0; ii < byTxRetry; ii++) { if (ii < 5) wFallBackRate = awHWRetry0[wRate-RATE_18M][ii]; else wFallBackRate = awHWRetry0[wRate-RATE_18M][4]; pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++; } } else if (byFallBack == AUTO_FB_1) { for (ii = 0; ii < byTxRetry; ii++) { if (ii < 5) wFallBackRate = awHWRetry1[wRate-RATE_18M][ii]; else wFallBackRate = awHWRetry1[wRate-RATE_18M][4]; pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++; } } } } if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) || (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) { if (BSSbIsSTAInNodeDB((void *) pDevice, pbyDestAddr, &uNodeIndex)) { pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts += 1; if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) { // transmit success, TxAttempts at least plus one pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++; if ( (byFallBack == AUTO_FB_NONE) || (wRate < RATE_18M) ) { wFallBackRate = wRate; } else if (byFallBack == AUTO_FB_0) { if (byTxRetry < 5) wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry]; else wFallBackRate = awHWRetry0[wRate-RATE_18M][4]; } else if (byFallBack == AUTO_FB_1) { if (byTxRetry < 5) wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry]; else wFallBackRate = awHWRetry1[wRate-RATE_18M][4]; } pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wFallBackRate]++; } else { pMgmt->sNodeDBTable[uNodeIndex].uTxFailures ++; } pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += byTxRetry; if (byTxRetry != 0) { pMgmt->sNodeDBTable[uNodeIndex].uTxFail[MAX_RATE]+=byTxRetry; if ( (byFallBack == AUTO_FB_NONE) || (wRate < RATE_18M) ) { pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wRate]+=byTxRetry; } else if (byFallBack == AUTO_FB_0) { for (ii = 0; ii < byTxRetry; ii++) { if (ii < 5) wFallBackRate = awHWRetry0[wRate-RATE_18M][ii]; else wFallBackRate = awHWRetry0[wRate-RATE_18M][4]; pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++; } } else if (byFallBack == AUTO_FB_1) { for (ii = 0; ii < byTxRetry; ii++) { if (ii < 5) wFallBackRate = awHWRetry1[wRate-RATE_18M][ii]; else wFallBackRate = awHWRetry1[wRate-RATE_18M][4]; pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++; } } } } } } } /*+ * * Routine Description: * Clear Nodes & skb in DB Table * * * Parameters: * In: * hDeviceContext - The adapter context. * uStartIndex - starting index * Out: * none * * Return Value: * None. * -*/ void BSSvClearNodeDBTable(void *hDeviceContext, unsigned int uStartIndex) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); struct sk_buff *skb; unsigned int ii; for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive) { // check if sTxPSQueue has been initial if (pMgmt->sNodeDBTable[ii].sTxPSQueue.next != NULL) { while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "PS skb != NULL %d\n", ii); dev_kfree_skb(skb); } } memset(&pMgmt->sNodeDBTable[ii], 0, sizeof(KnownNodeDB)); } } }; void s_vCheckSensitivity(void *hDeviceContext) { PSDevice pDevice = (PSDevice)hDeviceContext; PKnownBSS pBSSList = NULL; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int ii; if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) || ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) { pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID); if (pBSSList != NULL) { /* Update BB register if RSSI is too strong */ signed long LocalldBmAverage = 0; signed long uNumofdBm = 0; for (ii = 0; ii < RSSI_STAT_COUNT; ii++) { if (pBSSList->ldBmAverage[ii] != 0) { uNumofdBm ++; LocalldBmAverage += pBSSList->ldBmAverage[ii]; } } if (uNumofdBm > 0) { LocalldBmAverage = LocalldBmAverage/uNumofdBm; for (ii=0;ii<BB_VGA_LEVEL;ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LocalldBmAverage:%ld, %ld %02x\n", LocalldBmAverage, pDevice->ldBmThreshold[ii], pDevice->abyBBVGA[ii]); if (LocalldBmAverage < pDevice->ldBmThreshold[ii]) { pDevice->byBBVGANew = pDevice->abyBBVGA[ii]; break; } } if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) { pDevice->uBBVGADiffCount++; if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) bScheduleCommand((void *) pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL); } else { pDevice->uBBVGADiffCount = 0; } } } } } void s_uCalculateLinkQual(void *hDeviceContext) { PSDevice pDevice = (PSDevice)hDeviceContext; unsigned long TxOkRatio, TxCnt; unsigned long RxOkRatio, RxCnt; unsigned long RssiRatio; long ldBm; TxCnt = pDevice->scStatistic.TxNoRetryOkCount + pDevice->scStatistic.TxRetryOkCount + pDevice->scStatistic.TxFailCount; RxCnt = pDevice->scStatistic.RxFcsErrCnt + pDevice->scStatistic.RxOkCnt; TxOkRatio = (TxCnt < 6) ? 4000:((pDevice->scStatistic.TxNoRetryOkCount * 4000) / TxCnt); RxOkRatio = (RxCnt < 6) ? 2000:((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt); //decide link quality if(pDevice->bLinkPass !=TRUE) { pDevice->scStatistic.LinkQuality = 0; pDevice->scStatistic.SignalStren = 0; } else { RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm); if(-ldBm < 50) { RssiRatio = 4000; } else if(-ldBm > 90) { RssiRatio = 0; } else { RssiRatio = (40-(-ldBm-50))*4000/40; } pDevice->scStatistic.SignalStren = RssiRatio/40; pDevice->scStatistic.LinkQuality = (RssiRatio+TxOkRatio+RxOkRatio)/100; } pDevice->scStatistic.RxFcsErrCnt = 0; pDevice->scStatistic.RxOkCnt = 0; pDevice->scStatistic.TxFailCount = 0; pDevice->scStatistic.TxNoRetryOkCount = 0; pDevice->scStatistic.TxRetryOkCount = 0; } void BSSvClearAnyBSSJoinRecord(void *hDeviceContext) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned int ii; for (ii = 0; ii < MAX_BSS_NUM; ii++) pMgmt->sBSSList[ii].bSelected = FALSE; } void s_vCheckPreEDThreshold(void *hDeviceContext) { PSDevice pDevice = (PSDevice)hDeviceContext; PKnownBSS pBSSList = NULL; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) || ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) { pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID); if (pBSSList != NULL) { pDevice->byBBPreEDRSSI = (BYTE) (~(pBSSList->ldBmAverRange) + 1); BBvUpdatePreEDThreshold(pDevice, FALSE); } } }
gpl-2.0
MoKee/android_kernel_zte_x9180
drivers/s390/block/dasd_erp.c
8181
4940
/* * File...........: linux/drivers/s390/block/dasd.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 * */ #define KMSG_COMPONENT "dasd" #include <linux/ctype.h> #include <linux/init.h> #include <asm/debug.h> #include <asm/ebcdic.h> #include <asm/uaccess.h> /* This is ugly... */ #define PRINTK_HEADER "dasd_erp:" #include "dasd_int.h" struct dasd_ccw_req * dasd_alloc_erp_request(char *magic, int cplength, int datasize, struct dasd_device * device) { unsigned long flags; struct dasd_ccw_req *cqr; char *data; int size; /* Sanity checks */ BUG_ON( magic == NULL || datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE); size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; if (cplength > 0) size += cplength * sizeof(struct ccw1); if (datasize > 0) size += datasize; spin_lock_irqsave(&device->mem_lock, flags); cqr = (struct dasd_ccw_req *) dasd_alloc_chunk(&device->erp_chunks, size); spin_unlock_irqrestore(&device->mem_lock, flags); if (cqr == NULL) return ERR_PTR(-ENOMEM); memset(cqr, 0, sizeof(struct dasd_ccw_req)); INIT_LIST_HEAD(&cqr->devlist); INIT_LIST_HEAD(&cqr->blocklist); data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); cqr->cpaddr = NULL; if (cplength > 0) { cqr->cpaddr = (struct ccw1 *) data; data += cplength*sizeof(struct ccw1); memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); } cqr->data = NULL; if (datasize > 0) { cqr->data = data; memset(cqr->data, 0, datasize); } strncpy((char *) &cqr->magic, magic, 4); ASCEBC((char *) &cqr->magic, 4); set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); dasd_get_device(device); return cqr; } void dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device) { unsigned long flags; spin_lock_irqsave(&device->mem_lock, flags); dasd_free_chunk(&device->erp_chunks, cqr); spin_unlock_irqrestore(&device->mem_lock, flags); atomic_dec(&device->ref_count); } /* * dasd_default_erp_action just retries the current cqr */ struct dasd_ccw_req * dasd_default_erp_action(struct dasd_ccw_req *cqr) { struct dasd_device *device; device = cqr->startdev; /* just retry - there is nothing to save ... I got no sense data.... */ if (cqr->retries > 0) { DBF_DEV_EVENT(DBF_DEBUG, device, "default ERP called (%i retries left)", cqr->retries); if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) cqr->lpm = device->path_data.opm; cqr->status = DASD_CQR_FILLED; } else { pr_err("%s: default ERP has run out of retries and failed\n", dev_name(&device->cdev->dev)); cqr->status = DASD_CQR_FAILED; cqr->stopclk = get_clock(); } return cqr; } /* end dasd_default_erp_action */ /* * DESCRIPTION * Frees all ERPs of the current ERP Chain and set the status * of the original CQR either to DASD_CQR_DONE if ERP was successful * or to DASD_CQR_FAILED if ERP was NOT successful. * NOTE: This function is only called if no discipline postaction * is available * * PARAMETER * erp current erp_head * * RETURN VALUES * cqr pointer to the original CQR */ struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr) { int success; BUG_ON(cqr->refers == NULL || cqr->function == NULL); success = cqr->status == DASD_CQR_DONE; /* free all ERPs - but NOT the original cqr */ while (cqr->refers != NULL) { struct dasd_ccw_req *refers; refers = cqr->refers; /* remove the request from the block queue */ list_del(&cqr->blocklist); /* free the finished erp request */ dasd_free_erp_request(cqr, cqr->memdev); cqr = refers; } /* set corresponding status to original cqr */ if (success) cqr->status = DASD_CQR_DONE; else { cqr->status = DASD_CQR_FAILED; cqr->stopclk = get_clock(); } return cqr; } /* end default_erp_postaction */ void dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) { struct dasd_device *device; device = cqr->startdev; /* dump sense data */ if (device->discipline && device->discipline->dump_sense) device->discipline->dump_sense(device, cqr, irb); } void dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb) { struct dasd_device *device; device = cqr->startdev; /* dump sense data to s390 debugfeature*/ if (device->discipline && device->discipline->dump_sense_dbf) device->discipline->dump_sense_dbf(device, irb, "log"); } EXPORT_SYMBOL(dasd_log_sense_dbf); EXPORT_SYMBOL(dasd_default_erp_action); EXPORT_SYMBOL(dasd_default_erp_postaction); EXPORT_SYMBOL(dasd_alloc_erp_request); EXPORT_SYMBOL(dasd_free_erp_request); EXPORT_SYMBOL(dasd_log_sense);
gpl-2.0
cnexus/NexTKernel-d2spr
drivers/i2c/busses/i2c-nforce2-s4985.c
10229
7464
/* * i2c-nforce2-s4985.c - i2c-nforce2 extras for the Tyan S4985 motherboard * * Copyright (C) 2008 Jean Delvare <khali@linux-fr.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * We select the channels by sending commands to the Philips * PCA9556 chip at I2C address 0x18. The main adapter is used for * the non-multiplexed part of the bus, and 4 virtual adapters * are defined for the multiplexed addresses: 0x50-0x53 (memory * module EEPROM) located on channels 1-4. We define one virtual * adapter per CPU, which corresponds to one multiplexed channel: * CPU0: virtual adapter 1, channel 1 * CPU1: virtual adapter 2, channel 2 * CPU2: virtual adapter 3, channel 3 * CPU3: virtual adapter 4, channel 4 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/mutex.h> extern struct i2c_adapter *nforce2_smbus; static struct i2c_adapter *s4985_adapter; static struct i2c_algorithm *s4985_algo; /* Wrapper access functions for multiplexed SMBus */ static DEFINE_MUTEX(nforce2_lock); static s32 nforce2_access_virt0(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int error; /* We exclude the multiplexed addresses */ if ((addr & 0xfc) == 0x50 || (addr & 0xfc) == 0x30 || addr == 0x18) return -ENXIO; mutex_lock(&nforce2_lock); error = nforce2_smbus->algo->smbus_xfer(adap, addr, flags, read_write, command, size, data); mutex_unlock(&nforce2_lock); return error; } /* We remember the last used channels combination so as to only switch channels when it is really needed. This greatly reduces the SMBus overhead, but also assumes that nobody will be writing to the PCA9556 in our back. */ static u8 last_channels; static inline s32 nforce2_access_channel(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data, u8 channels) { int error; /* We exclude the non-multiplexed addresses */ if ((addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30) return -ENXIO; mutex_lock(&nforce2_lock); if (last_channels != channels) { union i2c_smbus_data mplxdata; mplxdata.byte = channels; error = nforce2_smbus->algo->smbus_xfer(adap, 0x18, 0, I2C_SMBUS_WRITE, 0x01, I2C_SMBUS_BYTE_DATA, &mplxdata); if (error) goto UNLOCK; last_channels = channels; } error = nforce2_smbus->algo->smbus_xfer(adap, addr, flags, read_write, command, size, data); UNLOCK: mutex_unlock(&nforce2_lock); return error; } static s32 nforce2_access_virt1(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { /* CPU0: channel 1 enabled */ return nforce2_access_channel(adap, addr, flags, read_write, command, size, data, 0x02); } static s32 nforce2_access_virt2(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { /* CPU1: channel 2 enabled */ return nforce2_access_channel(adap, addr, flags, read_write, command, size, data, 0x04); } static s32 nforce2_access_virt3(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { /* CPU2: channel 3 enabled */ return nforce2_access_channel(adap, addr, flags, read_write, command, size, data, 0x08); } static s32 nforce2_access_virt4(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { /* CPU3: channel 4 enabled */ return nforce2_access_channel(adap, addr, flags, read_write, command, size, data, 0x10); } static int __init nforce2_s4985_init(void) { int i, error; union i2c_smbus_data ioconfig; if (!nforce2_smbus) return -ENODEV; /* Configure the PCA9556 multiplexer */ ioconfig.byte = 0x00; /* All I/O to output mode */ error = i2c_smbus_xfer(nforce2_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, I2C_SMBUS_BYTE_DATA, &ioconfig); if (error) { dev_err(&nforce2_smbus->dev, "PCA9556 configuration failed\n"); error = -EIO; goto ERROR0; } /* Unregister physical bus */ error = i2c_del_adapter(nforce2_smbus); if (error) { dev_err(&nforce2_smbus->dev, "Physical bus removal failed\n"); goto ERROR0; } printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4985\n"); /* Define the 5 virtual adapters and algorithms structures */ s4985_adapter = kzalloc(5 * sizeof(struct i2c_adapter), GFP_KERNEL); if (!s4985_adapter) { error = -ENOMEM; goto ERROR1; } s4985_algo = kzalloc(5 * sizeof(struct i2c_algorithm), GFP_KERNEL); if (!s4985_algo) { error = -ENOMEM; goto ERROR2; } /* Fill in the new structures */ s4985_algo[0] = *(nforce2_smbus->algo); s4985_algo[0].smbus_xfer = nforce2_access_virt0; s4985_adapter[0] = *nforce2_smbus; s4985_adapter[0].algo = s4985_algo; s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent; for (i = 1; i < 5; i++) { s4985_algo[i] = *(nforce2_smbus->algo); s4985_adapter[i] = *nforce2_smbus; snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name), "SMBus nForce2 adapter (CPU%d)", i - 1); s4985_adapter[i].algo = s4985_algo + i; s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent; } s4985_algo[1].smbus_xfer = nforce2_access_virt1; s4985_algo[2].smbus_xfer = nforce2_access_virt2; s4985_algo[3].smbus_xfer = nforce2_access_virt3; s4985_algo[4].smbus_xfer = nforce2_access_virt4; /* Register virtual adapters */ for (i = 0; i < 5; i++) { error = i2c_add_adapter(s4985_adapter + i); if (error) { printk(KERN_ERR "i2c-nforce2-s4985: " "Virtual adapter %d registration " "failed, module not inserted\n", i); for (i--; i >= 0; i--) i2c_del_adapter(s4985_adapter + i); goto ERROR3; } } return 0; ERROR3: kfree(s4985_algo); s4985_algo = NULL; ERROR2: kfree(s4985_adapter); s4985_adapter = NULL; ERROR1: /* Restore physical bus */ i2c_add_adapter(nforce2_smbus); ERROR0: return error; } static void __exit nforce2_s4985_exit(void) { if (s4985_adapter) { int i; for (i = 0; i < 5; i++) i2c_del_adapter(s4985_adapter+i); kfree(s4985_adapter); s4985_adapter = NULL; } kfree(s4985_algo); s4985_algo = NULL; /* Restore physical bus */ if (i2c_add_adapter(nforce2_smbus)) printk(KERN_ERR "i2c-nforce2-s4985: " "Physical bus restoration failed\n"); } MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); MODULE_DESCRIPTION("S4985 SMBus multiplexing"); MODULE_LICENSE("GPL"); module_init(nforce2_s4985_init); module_exit(nforce2_s4985_exit);
gpl-2.0
TheNameIsNigel/android_kernel_carbon_msm8928
fs/ncpfs/ncplib_kernel.c
10997
33287
/* * ncplib_kernel.c * * Copyright (C) 1995, 1996 by Volker Lendecke * Modified for big endian by J.F. Chadima and David S. Miller * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache * Modified 1999 Wolfram Pienkoss for NLS * Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info * */ #include "ncp_fs.h" static inline void assert_server_locked(struct ncp_server *server) { if (server->lock == 0) { DPRINTK("ncpfs: server not locked!\n"); } } static void ncp_add_byte(struct ncp_server *server, __u8 x) { assert_server_locked(server); *(__u8 *) (&(server->packet[server->current_size])) = x; server->current_size += 1; return; } static void ncp_add_word(struct ncp_server *server, __le16 x) { assert_server_locked(server); put_unaligned(x, (__le16 *) (&(server->packet[server->current_size]))); server->current_size += 2; return; } static void ncp_add_be16(struct ncp_server *server, __u16 x) { assert_server_locked(server); put_unaligned(cpu_to_be16(x), (__be16 *) (&(server->packet[server->current_size]))); server->current_size += 2; } static void ncp_add_dword(struct ncp_server *server, __le32 x) { assert_server_locked(server); put_unaligned(x, (__le32 *) (&(server->packet[server->current_size]))); server->current_size += 4; return; } static void ncp_add_be32(struct ncp_server *server, __u32 x) { assert_server_locked(server); put_unaligned(cpu_to_be32(x), (__be32 *)(&(server->packet[server->current_size]))); server->current_size += 4; } static inline void ncp_add_dword_lh(struct ncp_server *server, __u32 x) { ncp_add_dword(server, cpu_to_le32(x)); } static void ncp_add_mem(struct ncp_server *server, const void *source, int size) { assert_server_locked(server); memcpy(&(server->packet[server->current_size]), source, size); server->current_size += size; return; } static void ncp_add_pstring(struct ncp_server *server, const char *s) { int len = strlen(s); assert_server_locked(server); if (len > 255) { DPRINTK("ncpfs: string too long: %s\n", s); len = 255; } ncp_add_byte(server, len); ncp_add_mem(server, s, len); return; } static inline void ncp_init_request(struct ncp_server *server) { ncp_lock_server(server); server->current_size = sizeof(struct ncp_request_header); server->has_subfunction = 0; } static inline void ncp_init_request_s(struct ncp_server *server, int subfunction) { ncp_lock_server(server); server->current_size = sizeof(struct ncp_request_header) + 2; ncp_add_byte(server, subfunction); server->has_subfunction = 1; } static inline char * ncp_reply_data(struct ncp_server *server, int offset) { return &(server->packet[sizeof(struct ncp_reply_header) + offset]); } static inline u8 BVAL(const void *data) { return *(const u8 *)data; } static u8 ncp_reply_byte(struct ncp_server *server, int offset) { return *(const u8 *)ncp_reply_data(server, offset); } static inline u16 WVAL_LH(const void *data) { return get_unaligned_le16(data); } static u16 ncp_reply_le16(struct ncp_server *server, int offset) { return get_unaligned_le16(ncp_reply_data(server, offset)); } static u16 ncp_reply_be16(struct ncp_server *server, int offset) { return get_unaligned_be16(ncp_reply_data(server, offset)); } static inline u32 DVAL_LH(const void *data) { return get_unaligned_le32(data); } static __le32 ncp_reply_dword(struct ncp_server *server, int offset) { return get_unaligned((__le32 *)ncp_reply_data(server, offset)); } static inline __u32 ncp_reply_dword_lh(struct ncp_server* server, int offset) { return le32_to_cpu(ncp_reply_dword(server, offset)); } int ncp_negotiate_buffersize(struct ncp_server *server, int size, int *target) { int result; ncp_init_request(server); ncp_add_be16(server, size); if ((result = ncp_request(server, 33)) != 0) { ncp_unlock_server(server); return result; } *target = min_t(unsigned int, ncp_reply_be16(server, 0), size); ncp_unlock_server(server); return 0; } /* options: * bit 0 ipx checksum * bit 1 packet signing */ int ncp_negotiate_size_and_options(struct ncp_server *server, int size, int options, int *ret_size, int *ret_options) { int result; /* there is minimum */ if (size < NCP_BLOCK_SIZE) size = NCP_BLOCK_SIZE; ncp_init_request(server); ncp_add_be16(server, size); ncp_add_byte(server, options); if ((result = ncp_request(server, 0x61)) != 0) { ncp_unlock_server(server); return result; } /* NCP over UDP returns 0 (!!!) */ result = ncp_reply_be16(server, 0); if (result >= NCP_BLOCK_SIZE) size = min(result, size); *ret_size = size; *ret_options = ncp_reply_byte(server, 4); ncp_unlock_server(server); return 0; } int ncp_get_volume_info_with_number(struct ncp_server* server, int n, struct ncp_volume_info* target) { int result; int len; ncp_init_request_s(server, 44); ncp_add_byte(server, n); if ((result = ncp_request(server, 22)) != 0) { goto out; } target->total_blocks = ncp_reply_dword_lh(server, 0); target->free_blocks = ncp_reply_dword_lh(server, 4); target->purgeable_blocks = ncp_reply_dword_lh(server, 8); target->not_yet_purgeable_blocks = ncp_reply_dword_lh(server, 12); target->total_dir_entries = ncp_reply_dword_lh(server, 16); target->available_dir_entries = ncp_reply_dword_lh(server, 20); target->sectors_per_block = ncp_reply_byte(server, 28); memset(&(target->volume_name), 0, sizeof(target->volume_name)); result = -EIO; len = ncp_reply_byte(server, 29); if (len > NCP_VOLNAME_LEN) { DPRINTK("ncpfs: volume name too long: %d\n", len); goto out; } memcpy(&(target->volume_name), ncp_reply_data(server, 30), len); result = 0; out: ncp_unlock_server(server); return result; } int ncp_get_directory_info(struct ncp_server* server, __u8 n, struct ncp_volume_info* target) { int result; int len; ncp_init_request_s(server, 45); ncp_add_byte(server, n); if ((result = ncp_request(server, 22)) != 0) { goto out; } target->total_blocks = ncp_reply_dword_lh(server, 0); target->free_blocks = ncp_reply_dword_lh(server, 4); target->purgeable_blocks = 0; target->not_yet_purgeable_blocks = 0; target->total_dir_entries = ncp_reply_dword_lh(server, 8); target->available_dir_entries = ncp_reply_dword_lh(server, 12); target->sectors_per_block = ncp_reply_byte(server, 20); memset(&(target->volume_name), 0, sizeof(target->volume_name)); result = -EIO; len = ncp_reply_byte(server, 21); if (len > NCP_VOLNAME_LEN) { DPRINTK("ncpfs: volume name too long: %d\n", len); goto out; } memcpy(&(target->volume_name), ncp_reply_data(server, 22), len); result = 0; out: ncp_unlock_server(server); return result; } int ncp_close_file(struct ncp_server *server, const char *file_id) { int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); result = ncp_request(server, 66); ncp_unlock_server(server); return result; } int ncp_make_closed(struct inode *inode) { int err; err = 0; mutex_lock(&NCP_FINFO(inode)->open_mutex); if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { atomic_set(&NCP_FINFO(inode)->opened, 0); err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle); if (!err) PPRINTK("ncp_make_closed: volnum=%d, dirent=%u, error=%d\n", NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum, err); } mutex_unlock(&NCP_FINFO(inode)->open_mutex); return err; } static void ncp_add_handle_path(struct ncp_server *server, __u8 vol_num, __le32 dir_base, int have_dir_base, const char *path) { ncp_add_byte(server, vol_num); ncp_add_dword(server, dir_base); if (have_dir_base != 0) { ncp_add_byte(server, 1); /* dir_base */ } else { ncp_add_byte(server, 0xff); /* no handle */ } if (path != NULL) { ncp_add_byte(server, 1); /* 1 component */ ncp_add_pstring(server, path); } else { ncp_add_byte(server, 0); } } int ncp_dirhandle_alloc(struct ncp_server* server, __u8 volnum, __le32 dirent, __u8* dirhandle) { int result; ncp_init_request(server); ncp_add_byte(server, 12); /* subfunction */ ncp_add_byte(server, NW_NS_DOS); ncp_add_byte(server, 0); ncp_add_word(server, 0); ncp_add_handle_path(server, volnum, dirent, 1, NULL); if ((result = ncp_request(server, 87)) == 0) { *dirhandle = ncp_reply_byte(server, 0); } ncp_unlock_server(server); return result; } int ncp_dirhandle_free(struct ncp_server* server, __u8 dirhandle) { int result; ncp_init_request_s(server, 20); ncp_add_byte(server, dirhandle); result = ncp_request(server, 22); ncp_unlock_server(server); return result; } void ncp_extract_file_info(const void *structure, struct nw_info_struct *target) { const __u8 *name_len; const int info_struct_size = offsetof(struct nw_info_struct, nameLen); memcpy(target, structure, info_struct_size); name_len = structure + info_struct_size; target->nameLen = *name_len; memcpy(target->entryName, name_len + 1, *name_len); target->entryName[*name_len] = '\0'; target->volNumber = le32_to_cpu(target->volNumber); return; } #ifdef CONFIG_NCPFS_NFS_NS static inline void ncp_extract_nfs_info(const unsigned char *structure, struct nw_nfs_info *target) { target->mode = DVAL_LH(structure); target->rdev = DVAL_LH(structure + 8); } #endif int ncp_obtain_nfs_info(struct ncp_server *server, struct nw_info_struct *target) { int result = 0; #ifdef CONFIG_NCPFS_NFS_NS __u32 volnum = target->volNumber; if (ncp_is_nfs_extras(server, volnum)) { ncp_init_request(server); ncp_add_byte(server, 19); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, NW_NS_NFS); ncp_add_byte(server, 0); ncp_add_byte(server, volnum); ncp_add_dword(server, target->dirEntNum); /* We must retrieve both nlinks and rdev, otherwise some server versions report zeroes instead of valid data */ ncp_add_dword_lh(server, NSIBM_NFS_MODE | NSIBM_NFS_NLINKS | NSIBM_NFS_RDEV); if ((result = ncp_request(server, 87)) == 0) { ncp_extract_nfs_info(ncp_reply_data(server, 0), &target->nfs); DPRINTK(KERN_DEBUG "ncp_obtain_nfs_info: (%s) mode=0%o, rdev=0x%x\n", target->entryName, target->nfs.mode, target->nfs.rdev); } else { target->nfs.mode = 0; target->nfs.rdev = 0; } ncp_unlock_server(server); } else #endif { target->nfs.mode = 0; target->nfs.rdev = 0; } return result; } /* * Returns information for a (one-component) name relative to * the specified directory. */ int ncp_obtain_info(struct ncp_server *server, struct inode *dir, const char *path, struct nw_info_struct *target) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; int result; if (target == NULL) { printk(KERN_ERR "ncp_obtain_info: invalid call\n"); return -EINVAL; } ncp_init_request(server); ncp_add_byte(server, 6); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, server->name_space[volnum]); /* N.B. twice ?? */ ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */ ncp_add_dword(server, RIM_ALL); ncp_add_handle_path(server, volnum, dirent, 1, path); if ((result = ncp_request(server, 87)) != 0) goto out; ncp_extract_file_info(ncp_reply_data(server, 0), target); ncp_unlock_server(server); result = ncp_obtain_nfs_info(server, target); return result; out: ncp_unlock_server(server); return result; } #ifdef CONFIG_NCPFS_NFS_NS static int ncp_obtain_DOS_dir_base(struct ncp_server *server, __u8 ns, __u8 volnum, __le32 dirent, const char *path, /* At most 1 component */ __le32 *DOS_dir_base) { int result; ncp_init_request(server); ncp_add_byte(server, 6); /* subfunction */ ncp_add_byte(server, ns); ncp_add_byte(server, ns); ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */ ncp_add_dword(server, RIM_DIRECTORY); ncp_add_handle_path(server, volnum, dirent, 1, path); if ((result = ncp_request(server, 87)) == 0) { if (DOS_dir_base) *DOS_dir_base=ncp_reply_dword(server, 0x34); } ncp_unlock_server(server); return result; } #endif /* CONFIG_NCPFS_NFS_NS */ static inline int ncp_get_known_namespace(struct ncp_server *server, __u8 volume) { #if defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS) int result; __u8 *namespace; __u16 no_namespaces; ncp_init_request(server); ncp_add_byte(server, 24); /* Subfunction: Get Name Spaces Loaded */ ncp_add_word(server, 0); ncp_add_byte(server, volume); if ((result = ncp_request(server, 87)) != 0) { ncp_unlock_server(server); return NW_NS_DOS; /* not result ?? */ } result = NW_NS_DOS; no_namespaces = ncp_reply_le16(server, 0); namespace = ncp_reply_data(server, 2); while (no_namespaces > 0) { DPRINTK("get_namespaces: found %d on %d\n", *namespace, volume); #ifdef CONFIG_NCPFS_NFS_NS if ((*namespace == NW_NS_NFS) && !(server->m.flags&NCP_MOUNT_NO_NFS)) { result = NW_NS_NFS; break; } #endif /* CONFIG_NCPFS_NFS_NS */ #ifdef CONFIG_NCPFS_OS2_NS if ((*namespace == NW_NS_OS2) && !(server->m.flags&NCP_MOUNT_NO_OS2)) { result = NW_NS_OS2; } #endif /* CONFIG_NCPFS_OS2_NS */ namespace += 1; no_namespaces -= 1; } ncp_unlock_server(server); return result; #else /* neither OS2 nor NFS - only DOS */ return NW_NS_DOS; #endif /* defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS) */ } int ncp_update_known_namespace(struct ncp_server *server, __u8 volume, int *ret_ns) { int ns = ncp_get_known_namespace(server, volume); if (ret_ns) *ret_ns = ns; DPRINTK("lookup_vol: namespace[%d] = %d\n", volume, server->name_space[volume]); if (server->name_space[volume] == ns) return 0; server->name_space[volume] = ns; return 1; } static int ncp_ObtainSpecificDirBase(struct ncp_server *server, __u8 nsSrc, __u8 nsDst, __u8 vol_num, __le32 dir_base, const char *path, /* At most 1 component */ __le32 *dirEntNum, __le32 *DosDirNum) { int result; ncp_init_request(server); ncp_add_byte(server, 6); /* subfunction */ ncp_add_byte(server, nsSrc); ncp_add_byte(server, nsDst); ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */ ncp_add_dword(server, RIM_ALL); ncp_add_handle_path(server, vol_num, dir_base, 1, path); if ((result = ncp_request(server, 87)) != 0) { ncp_unlock_server(server); return result; } if (dirEntNum) *dirEntNum = ncp_reply_dword(server, 0x30); if (DosDirNum) *DosDirNum = ncp_reply_dword(server, 0x34); ncp_unlock_server(server); return 0; } int ncp_mount_subdir(struct ncp_server *server, __u8 volNumber, __u8 srcNS, __le32 dirEntNum, __u32* volume, __le32* newDirEnt, __le32* newDosEnt) { int dstNS; int result; ncp_update_known_namespace(server, volNumber, &dstNS); if ((result = ncp_ObtainSpecificDirBase(server, srcNS, dstNS, volNumber, dirEntNum, NULL, newDirEnt, newDosEnt)) != 0) { return result; } *volume = volNumber; server->m.mounted_vol[1] = 0; server->m.mounted_vol[0] = 'X'; return 0; } int ncp_get_volume_root(struct ncp_server *server, const char *volname, __u32* volume, __le32* dirent, __le32* dosdirent) { int result; DPRINTK("ncp_get_volume_root: looking up vol %s\n", volname); ncp_init_request(server); ncp_add_byte(server, 22); /* Subfunction: Generate dir handle */ ncp_add_byte(server, 0); /* DOS namespace */ ncp_add_byte(server, 0); /* reserved */ ncp_add_byte(server, 0); /* reserved */ ncp_add_byte(server, 0); /* reserved */ ncp_add_byte(server, 0); /* faked volume number */ ncp_add_dword(server, 0); /* faked dir_base */ ncp_add_byte(server, 0xff); /* Don't have a dir_base */ ncp_add_byte(server, 1); /* 1 path component */ ncp_add_pstring(server, volname); if ((result = ncp_request(server, 87)) != 0) { ncp_unlock_server(server); return result; } *dirent = *dosdirent = ncp_reply_dword(server, 4); *volume = ncp_reply_byte(server, 8); ncp_unlock_server(server); return 0; } int ncp_lookup_volume(struct ncp_server *server, const char *volname, struct nw_info_struct *target) { int result; memset(target, 0, sizeof(*target)); result = ncp_get_volume_root(server, volname, &target->volNumber, &target->dirEntNum, &target->DosDirNum); if (result) { return result; } ncp_update_known_namespace(server, target->volNumber, NULL); target->nameLen = strlen(volname); memcpy(target->entryName, volname, target->nameLen+1); target->attributes = aDIR; /* set dates to Jan 1, 1986 00:00 */ target->creationTime = target->modifyTime = cpu_to_le16(0x0000); target->creationDate = target->modifyDate = target->lastAccessDate = cpu_to_le16(0x0C21); target->nfs.mode = 0; return 0; } int ncp_modify_file_or_subdir_dos_info_path(struct ncp_server *server, struct inode *dir, const char *path, __le32 info_mask, const struct nw_modify_dos_info *info) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; int result; ncp_init_request(server); ncp_add_byte(server, 7); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, 0); /* reserved */ ncp_add_word(server, cpu_to_le16(0x8006)); /* search attribs: all */ ncp_add_dword(server, info_mask); ncp_add_mem(server, info, sizeof(*info)); ncp_add_handle_path(server, volnum, dirent, 1, path); result = ncp_request(server, 87); ncp_unlock_server(server); return result; } int ncp_modify_file_or_subdir_dos_info(struct ncp_server *server, struct inode *dir, __le32 info_mask, const struct nw_modify_dos_info *info) { return ncp_modify_file_or_subdir_dos_info_path(server, dir, NULL, info_mask, info); } #ifdef CONFIG_NCPFS_NFS_NS int ncp_modify_nfs_info(struct ncp_server *server, __u8 volnum, __le32 dirent, __u32 mode, __u32 rdev) { int result = 0; ncp_init_request(server); if (server->name_space[volnum] == NW_NS_NFS) { ncp_add_byte(server, 25); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, NW_NS_NFS); ncp_add_byte(server, volnum); ncp_add_dword(server, dirent); /* we must always operate on both nlinks and rdev, otherwise rdev is not set */ ncp_add_dword_lh(server, NSIBM_NFS_MODE | NSIBM_NFS_NLINKS | NSIBM_NFS_RDEV); ncp_add_dword_lh(server, mode); ncp_add_dword_lh(server, 1); /* nlinks */ ncp_add_dword_lh(server, rdev); result = ncp_request(server, 87); } ncp_unlock_server(server); return result; } #endif static int ncp_DeleteNSEntry(struct ncp_server *server, __u8 have_dir_base, __u8 volnum, __le32 dirent, const char* name, __u8 ns, __le16 attr) { int result; ncp_init_request(server); ncp_add_byte(server, 8); /* subfunction */ ncp_add_byte(server, ns); ncp_add_byte(server, 0); /* reserved */ ncp_add_word(server, attr); /* search attribs: all */ ncp_add_handle_path(server, volnum, dirent, have_dir_base, name); result = ncp_request(server, 87); ncp_unlock_server(server); return result; } int ncp_del_file_or_subdir2(struct ncp_server *server, struct dentry *dentry) { struct inode *inode = dentry->d_inode; __u8 volnum; __le32 dirent; if (!inode) { return 0xFF; /* Any error */ } volnum = NCP_FINFO(inode)->volNumber; dirent = NCP_FINFO(inode)->DosDirNum; return ncp_DeleteNSEntry(server, 1, volnum, dirent, NULL, NW_NS_DOS, cpu_to_le16(0x8006)); } int ncp_del_file_or_subdir(struct ncp_server *server, struct inode *dir, const char *name) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; int name_space; name_space = server->name_space[volnum]; #ifdef CONFIG_NCPFS_NFS_NS if (name_space == NW_NS_NFS) { int result; result=ncp_obtain_DOS_dir_base(server, name_space, volnum, dirent, name, &dirent); if (result) return result; name = NULL; name_space = NW_NS_DOS; } #endif /* CONFIG_NCPFS_NFS_NS */ return ncp_DeleteNSEntry(server, 1, volnum, dirent, name, name_space, cpu_to_le16(0x8006)); } static inline void ConvertToNWfromDWORD(__u16 v0, __u16 v1, __u8 ret[6]) { __le16 *dest = (__le16 *) ret; dest[1] = cpu_to_le16(v0); dest[2] = cpu_to_le16(v1); dest[0] = cpu_to_le16(v0 + 1); return; } /* If both dir and name are NULL, then in target there's already a looked-up entry that wants to be opened. */ int ncp_open_create_file_or_subdir(struct ncp_server *server, struct inode *dir, const char *name, int open_create_mode, __le32 create_attributes, __le16 desired_acc_rights, struct ncp_entry_info *target) { __le16 search_attribs = cpu_to_le16(0x0006); __u8 volnum; __le32 dirent; int result; volnum = NCP_FINFO(dir)->volNumber; dirent = NCP_FINFO(dir)->dirEntNum; if ((create_attributes & aDIR) != 0) { search_attribs |= cpu_to_le16(0x8000); } ncp_init_request(server); ncp_add_byte(server, 1); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, open_create_mode); ncp_add_word(server, search_attribs); ncp_add_dword(server, RIM_ALL); ncp_add_dword(server, create_attributes); /* The desired acc rights seem to be the inherited rights mask for directories */ ncp_add_word(server, desired_acc_rights); ncp_add_handle_path(server, volnum, dirent, 1, name); if ((result = ncp_request(server, 87)) != 0) goto out; if (!(create_attributes & aDIR)) target->opened = 1; /* in target there's a new finfo to fill */ ncp_extract_file_info(ncp_reply_data(server, 6), &(target->i)); target->volume = target->i.volNumber; ConvertToNWfromDWORD(ncp_reply_le16(server, 0), ncp_reply_le16(server, 2), target->file_handle); ncp_unlock_server(server); (void)ncp_obtain_nfs_info(server, &(target->i)); return 0; out: ncp_unlock_server(server); return result; } int ncp_initialize_search(struct ncp_server *server, struct inode *dir, struct nw_search_sequence *target) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; int result; ncp_init_request(server); ncp_add_byte(server, 2); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, 0); /* reserved */ ncp_add_handle_path(server, volnum, dirent, 1, NULL); result = ncp_request(server, 87); if (result) goto out; memcpy(target, ncp_reply_data(server, 0), sizeof(*target)); out: ncp_unlock_server(server); return result; } int ncp_search_for_fileset(struct ncp_server *server, struct nw_search_sequence *seq, int* more, int* cnt, char* buffer, size_t bufsize, char** rbuf, size_t* rsize) { int result; ncp_init_request(server); ncp_add_byte(server, 20); ncp_add_byte(server, server->name_space[seq->volNumber]); ncp_add_byte(server, 0); /* datastream */ ncp_add_word(server, cpu_to_le16(0x8006)); ncp_add_dword(server, RIM_ALL); ncp_add_word(server, cpu_to_le16(32767)); /* max returned items */ ncp_add_mem(server, seq, 9); #ifdef CONFIG_NCPFS_NFS_NS if (server->name_space[seq->volNumber] == NW_NS_NFS) { ncp_add_byte(server, 0); /* 0 byte pattern */ } else #endif { ncp_add_byte(server, 2); /* 2 byte pattern */ ncp_add_byte(server, 0xff); /* following is a wildcard */ ncp_add_byte(server, '*'); } result = ncp_request2(server, 87, buffer, bufsize); if (result) { ncp_unlock_server(server); return result; } if (server->ncp_reply_size < 12) { ncp_unlock_server(server); return 0xFF; } *rsize = server->ncp_reply_size - 12; ncp_unlock_server(server); buffer = buffer + sizeof(struct ncp_reply_header); *rbuf = buffer + 12; *cnt = WVAL_LH(buffer + 10); *more = BVAL(buffer + 9); memcpy(seq, buffer, 9); return 0; } static int ncp_RenameNSEntry(struct ncp_server *server, struct inode *old_dir, const char *old_name, __le16 old_type, struct inode *new_dir, const char *new_name) { int result = -EINVAL; if ((old_dir == NULL) || (old_name == NULL) || (new_dir == NULL) || (new_name == NULL)) goto out; ncp_init_request(server); ncp_add_byte(server, 4); /* subfunction */ ncp_add_byte(server, server->name_space[NCP_FINFO(old_dir)->volNumber]); ncp_add_byte(server, 1); /* rename flag */ ncp_add_word(server, old_type); /* search attributes */ /* source Handle Path */ ncp_add_byte(server, NCP_FINFO(old_dir)->volNumber); ncp_add_dword(server, NCP_FINFO(old_dir)->dirEntNum); ncp_add_byte(server, 1); ncp_add_byte(server, 1); /* 1 source component */ /* dest Handle Path */ ncp_add_byte(server, NCP_FINFO(new_dir)->volNumber); ncp_add_dword(server, NCP_FINFO(new_dir)->dirEntNum); ncp_add_byte(server, 1); ncp_add_byte(server, 1); /* 1 destination component */ /* source path string */ ncp_add_pstring(server, old_name); /* dest path string */ ncp_add_pstring(server, new_name); result = ncp_request(server, 87); ncp_unlock_server(server); out: return result; } int ncp_ren_or_mov_file_or_subdir(struct ncp_server *server, struct inode *old_dir, const char *old_name, struct inode *new_dir, const char *new_name) { int result; __le16 old_type = cpu_to_le16(0x06); /* If somebody can do it atomic, call me... vandrove@vc.cvut.cz */ result = ncp_RenameNSEntry(server, old_dir, old_name, old_type, new_dir, new_name); if (result == 0xFF) /* File Not Found, try directory */ { old_type = cpu_to_le16(0x16); result = ncp_RenameNSEntry(server, old_dir, old_name, old_type, new_dir, new_name); } if (result != 0x92) return result; /* All except NO_FILES_RENAMED */ result = ncp_del_file_or_subdir(server, new_dir, new_name); if (result != 0) return -EACCES; result = ncp_RenameNSEntry(server, old_dir, old_name, old_type, new_dir, new_name); return result; } /* We have to transfer to/from user space */ int ncp_read_kernel(struct ncp_server *server, const char *file_id, __u32 offset, __u16 to_read, char *target, int *bytes_read) { const char *source; int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be16(server, to_read); if ((result = ncp_request(server, 72)) != 0) { goto out; } *bytes_read = ncp_reply_be16(server, 0); source = ncp_reply_data(server, 2 + (offset & 1)); memcpy(target, source, *bytes_read); out: ncp_unlock_server(server); return result; } /* There is a problem... egrep and some other silly tools do: x = mmap(NULL, MAP_PRIVATE, PROT_READ|PROT_WRITE, <ncpfs fd>, 32768); read(<ncpfs fd>, x, 32768); Now copying read result by copy_to_user causes pagefault. This pagefault could not be handled because of server was locked due to read. So we have to use temporary buffer. So ncp_unlock_server must be done before copy_to_user (and for write, copy_from_user must be done before ncp_init_request... same applies for send raw packet ioctl). Because of file is normally read in bigger chunks, caller provides kmalloced (vmalloced) chunk of memory with size >= to_read... */ int ncp_read_bounce(struct ncp_server *server, const char *file_id, __u32 offset, __u16 to_read, char __user *target, int *bytes_read, void* bounce, __u32 bufsize) { int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be16(server, to_read); result = ncp_request2(server, 72, bounce, bufsize); ncp_unlock_server(server); if (!result) { int len = get_unaligned_be16((char *)bounce + sizeof(struct ncp_reply_header)); result = -EIO; if (len <= to_read) { char* source; source = (char*)bounce + sizeof(struct ncp_reply_header) + 2 + (offset & 1); *bytes_read = len; result = 0; if (copy_to_user(target, source, len)) result = -EFAULT; } } return result; } int ncp_write_kernel(struct ncp_server *server, const char *file_id, __u32 offset, __u16 to_write, const char *source, int *bytes_written) { int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be16(server, to_write); ncp_add_mem(server, source, to_write); if ((result = ncp_request(server, 73)) == 0) *bytes_written = to_write; ncp_unlock_server(server); return result; } #ifdef CONFIG_NCPFS_IOCTL_LOCKING int ncp_LogPhysicalRecord(struct ncp_server *server, const char *file_id, __u8 locktype, __u32 offset, __u32 length, __u16 timeout) { int result; ncp_init_request(server); ncp_add_byte(server, locktype); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be32(server, length); ncp_add_be16(server, timeout); if ((result = ncp_request(server, 0x1A)) != 0) { ncp_unlock_server(server); return result; } ncp_unlock_server(server); return 0; } int ncp_ClearPhysicalRecord(struct ncp_server *server, const char *file_id, __u32 offset, __u32 length) { int result; ncp_init_request(server); ncp_add_byte(server, 0); /* who knows... lanalyzer says that */ ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be32(server, length); if ((result = ncp_request(server, 0x1E)) != 0) { ncp_unlock_server(server); return result; } ncp_unlock_server(server); return 0; } #endif /* CONFIG_NCPFS_IOCTL_LOCKING */ #ifdef CONFIG_NCPFS_NLS /* This are the NLS conversion routines with inspirations and code parts * from the vfat file system and hints from Petr Vandrovec. */ int ncp__io2vol(struct ncp_server *server, unsigned char *vname, unsigned int *vlen, const unsigned char *iname, unsigned int ilen, int cc) { struct nls_table *in = server->nls_io; struct nls_table *out = server->nls_vol; unsigned char *vname_start; unsigned char *vname_end; const unsigned char *iname_end; iname_end = iname + ilen; vname_start = vname; vname_end = vname + *vlen - 1; while (iname < iname_end) { int chl; wchar_t ec; if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) { int k; unicode_t u; k = utf8_to_utf32(iname, iname_end - iname, &u); if (k < 0 || u > MAX_WCHAR_T) return -EINVAL; iname += k; ec = u; } else { if (*iname == NCP_ESC) { int k; if (iname_end - iname < 5) goto nospec; ec = 0; for (k = 1; k < 5; k++) { unsigned char nc; nc = iname[k] - '0'; if (nc >= 10) { nc -= 'A' - '0' - 10; if ((nc < 10) || (nc > 15)) { goto nospec; } } ec = (ec << 4) | nc; } iname += 5; } else { nospec:; if ( (chl = in->char2uni(iname, iname_end - iname, &ec)) < 0) return chl; iname += chl; } } /* unitoupper should be here! */ chl = out->uni2char(ec, vname, vname_end - vname); if (chl < 0) return chl; /* this is wrong... */ if (cc) { int chi; for (chi = 0; chi < chl; chi++){ vname[chi] = ncp_toupper(out, vname[chi]); } } vname += chl; } *vname = 0; *vlen = vname - vname_start; return 0; } int ncp__vol2io(struct ncp_server *server, unsigned char *iname, unsigned int *ilen, const unsigned char *vname, unsigned int vlen, int cc) { struct nls_table *in = server->nls_vol; struct nls_table *out = server->nls_io; const unsigned char *vname_end; unsigned char *iname_start; unsigned char *iname_end; unsigned char *vname_cc; int err; vname_cc = NULL; if (cc) { int i; /* this is wrong! */ vname_cc = kmalloc(vlen, GFP_KERNEL); if (!vname_cc) return -ENOMEM; for (i = 0; i < vlen; i++) vname_cc[i] = ncp_tolower(in, vname[i]); vname = vname_cc; } iname_start = iname; iname_end = iname + *ilen - 1; vname_end = vname + vlen; while (vname < vname_end) { wchar_t ec; int chl; if ( (chl = in->char2uni(vname, vname_end - vname, &ec)) < 0) { err = chl; goto quit; } vname += chl; /* unitolower should be here! */ if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) { int k; k = utf32_to_utf8(ec, iname, iname_end - iname); if (k < 0) { err = -ENAMETOOLONG; goto quit; } iname += k; } else { if ( (chl = out->uni2char(ec, iname, iname_end - iname)) >= 0) { iname += chl; } else { int k; if (iname_end - iname < 5) { err = -ENAMETOOLONG; goto quit; } *iname = NCP_ESC; for (k = 4; k > 0; k--) { unsigned char v; v = (ec & 0xF) + '0'; if (v > '9') { v += 'A' - '9' - 1; } iname[k] = v; ec >>= 4; } iname += 5; } } } *iname = 0; *ilen = iname - iname_start; err = 0; quit:; if (cc) kfree(vname_cc); return err; } #else int ncp__io2vol(unsigned char *vname, unsigned int *vlen, const unsigned char *iname, unsigned int ilen, int cc) { int i; if (*vlen <= ilen) return -ENAMETOOLONG; if (cc) for (i = 0; i < ilen; i++) { *vname = toupper(*iname); vname++; iname++; } else { memmove(vname, iname, ilen); vname += ilen; } *vlen = ilen; *vname = 0; return 0; } int ncp__vol2io(unsigned char *iname, unsigned int *ilen, const unsigned char *vname, unsigned int vlen, int cc) { int i; if (*ilen <= vlen) return -ENAMETOOLONG; if (cc) for (i = 0; i < vlen; i++) { *iname = tolower(*vname); iname++; vname++; } else { memmove(iname, vname, vlen); iname += vlen; } *ilen = vlen; *iname = 0; return 0; } #endif
gpl-2.0
sleshepic/G920T_OI1_kernel
arch/sh/mm/cache-sh2.c
10997
2289
/* * arch/sh/mm/cache-sh2.c * * Copyright (C) 2002 Paul Mundt * Copyright (C) 2008 Yoshinori Sato * * Released under the terms of the GNU GPL v2.0. */ #include <linux/init.h> #include <linux/mm.h> #include <asm/cache.h> #include <asm/addrspace.h> #include <asm/processor.h> #include <asm/cacheflush.h> #include <asm/io.h> static void sh2__flush_wback_region(void *start, int size) { unsigned long v; unsigned long begin, end; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) { unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0); int way; for (way = 0; way < 4; way++) { unsigned long data = __raw_readl(addr | (way << 12)); if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { data &= ~SH_CACHE_UPDATED; __raw_writel(data, addr | (way << 12)); } } } } static void sh2__flush_purge_region(void *start, int size) { unsigned long v; unsigned long begin, end; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) __raw_writel((v & CACHE_PHYSADDR_MASK), CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); } static void sh2__flush_invalidate_region(void *start, int size) { #ifdef CONFIG_CACHE_WRITEBACK /* * SH-2 does not support individual line invalidation, only a * global invalidate. */ unsigned long ccr; unsigned long flags; local_irq_save(flags); jump_to_uncached(); ccr = __raw_readl(CCR); ccr |= CCR_CACHE_INVALIDATE; __raw_writel(ccr, CCR); back_to_cached(); local_irq_restore(flags); #else unsigned long v; unsigned long begin, end; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) __raw_writel((v & CACHE_PHYSADDR_MASK), CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); #endif } void __init sh2_cache_init(void) { __flush_wback_region = sh2__flush_wback_region; __flush_purge_region = sh2__flush_purge_region; __flush_invalidate_region = sh2__flush_invalidate_region; }
gpl-2.0
dhiru1602/android_kernel_samsung_jf
crypto/michael_mic.c
12277
3701
/* * Cryptographic API * * Michael MIC (IEEE 802.11i/TKIP) keyed digest * * Copyright (c) 2004 Jouni Malinen <j@w1.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <crypto/internal/hash.h> #include <asm/byteorder.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> struct michael_mic_ctx { u32 l, r; }; struct michael_mic_desc_ctx { u8 pending[4]; size_t pending_len; u32 l, r; }; static inline u32 xswap(u32 val) { return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); } #define michael_block(l, r) \ do { \ r ^= rol32(l, 17); \ l += r; \ r ^= xswap(l); \ l += r; \ r ^= rol32(l, 3); \ l += r; \ r ^= ror32(l, 2); \ l += r; \ } while (0) static int michael_init(struct shash_desc *desc) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); struct michael_mic_ctx *ctx = crypto_shash_ctx(desc->tfm); mctx->pending_len = 0; mctx->l = ctx->l; mctx->r = ctx->r; return 0; } static int michael_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); const __le32 *src; if (mctx->pending_len) { int flen = 4 - mctx->pending_len; if (flen > len) flen = len; memcpy(&mctx->pending[mctx->pending_len], data, flen); mctx->pending_len += flen; data += flen; len -= flen; if (mctx->pending_len < 4) return 0; src = (const __le32 *)mctx->pending; mctx->l ^= le32_to_cpup(src); michael_block(mctx->l, mctx->r); mctx->pending_len = 0; } src = (const __le32 *)data; while (len >= 4) { mctx->l ^= le32_to_cpup(src++); michael_block(mctx->l, mctx->r); len -= 4; } if (len > 0) { mctx->pending_len = len; memcpy(mctx->pending, src, len); } return 0; } static int michael_final(struct shash_desc *desc, u8 *out) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); u8 *data = mctx->pending; __le32 *dst = (__le32 *)out; /* Last block and padding (0x5a, 4..7 x 0) */ switch (mctx->pending_len) { case 0: mctx->l ^= 0x5a; break; case 1: mctx->l ^= data[0] | 0x5a00; break; case 2: mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000; break; case 3: mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) | 0x5a000000; break; } michael_block(mctx->l, mctx->r); /* l ^= 0; */ michael_block(mctx->l, mctx->r); dst[0] = cpu_to_le32(mctx->l); dst[1] = cpu_to_le32(mctx->r); return 0; } static int michael_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm); const __le32 *data = (const __le32 *)key; if (keylen != 8) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } mctx->l = le32_to_cpu(data[0]); mctx->r = le32_to_cpu(data[1]); return 0; } static struct shash_alg alg = { .digestsize = 8, .setkey = michael_setkey, .init = michael_init, .update = michael_update, .final = michael_final, .descsize = sizeof(struct michael_mic_desc_ctx), .base = { .cra_name = "michael_mic", .cra_blocksize = 8, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct michael_mic_ctx), .cra_module = THIS_MODULE, } }; static int __init michael_mic_init(void) { return crypto_register_shash(&alg); } static void __exit michael_mic_exit(void) { crypto_unregister_shash(&alg); } module_init(michael_mic_init); module_exit(michael_mic_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Michael MIC"); MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
gpl-2.0
invisiblek/android_kernel_samsung_jaspervzw
drivers/media/video/bt8xx/bttv-audio-hook.c
13813
9617
/* * Handlers for board audio hooks, splitted from bttv-cards * * Copyright (c) 2006 Mauro Carvalho Chehab (mchehab@infradead.org) * This code is placed under the terms of the GNU General Public License */ #include "bttv-audio-hook.h" #include <linux/delay.h> /* ----------------------------------------------------------------------- */ /* winview */ void winview_volume(struct bttv *btv, __u16 volume) { /* PT2254A programming Jon Tombs, jon@gte.esi.us.es */ int bits_out, loops, vol, data; /* 32 levels logarithmic */ vol = 32 - ((volume>>11)); /* units */ bits_out = (PT2254_DBS_IN_2>>(vol%5)); /* tens */ bits_out |= (PT2254_DBS_IN_10>>(vol/5)); bits_out |= PT2254_L_CHANNEL | PT2254_R_CHANNEL; data = gpio_read(); data &= ~(WINVIEW_PT2254_CLK| WINVIEW_PT2254_DATA| WINVIEW_PT2254_STROBE); for (loops = 17; loops >= 0 ; loops--) { if (bits_out & (1<<loops)) data |= WINVIEW_PT2254_DATA; else data &= ~WINVIEW_PT2254_DATA; gpio_write(data); udelay(5); data |= WINVIEW_PT2254_CLK; gpio_write(data); udelay(5); data &= ~WINVIEW_PT2254_CLK; gpio_write(data); } data |= WINVIEW_PT2254_STROBE; data &= ~WINVIEW_PT2254_DATA; gpio_write(data); udelay(10); data &= ~WINVIEW_PT2254_STROBE; gpio_write(data); } /* ----------------------------------------------------------------------- */ /* mono/stereo control for various cards (which don't use i2c chips but */ /* connect something to the GPIO pins */ void gvbctv3pci_audio(struct bttv *btv, struct v4l2_tuner *t, int set) { unsigned int con = 0; if (set) { gpio_inout(0x300, 0x300); if (t->audmode & V4L2_TUNER_MODE_LANG1) con = 0x000; if (t->audmode & V4L2_TUNER_MODE_LANG2) con = 0x300; if (t->audmode & V4L2_TUNER_MODE_STEREO) con = 0x200; /* if (t->audmode & V4L2_TUNER_MODE_MONO) * con = 0x100; */ gpio_bits(0x300, con); } else { t->audmode = V4L2_TUNER_MODE_STEREO | V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; } } void gvbctv5pci_audio(struct bttv *btv, struct v4l2_tuner *t, int set) { unsigned int val, con; if (btv->radio_user) return; val = gpio_read(); if (set) { con = 0x000; if (t->audmode & V4L2_TUNER_MODE_LANG2) { if (t->audmode & V4L2_TUNER_MODE_LANG1) { /* LANG1 + LANG2 */ con = 0x100; } else { /* LANG2 */ con = 0x300; } } if (con != (val & 0x300)) { gpio_bits(0x300, con); if (bttv_gpio) bttv_gpio_tracking(btv,"gvbctv5pci"); } } else { switch (val & 0x70) { case 0x10: t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; break; case 0x30: t->rxsubchans = V4L2_TUNER_SUB_LANG2; break; case 0x50: t->rxsubchans = V4L2_TUNER_SUB_LANG1; break; case 0x60: t->rxsubchans = V4L2_TUNER_SUB_STEREO; break; case 0x70: t->rxsubchans = V4L2_TUNER_SUB_MONO; break; default: t->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; } t->audmode = V4L2_TUNER_MODE_STEREO | V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; } } /* * Mario Medina Nussbaum <medisoft@alohabbs.org.mx> * I discover that on BT848_GPIO_DATA address a byte 0xcce enable stereo, * 0xdde enables mono and 0xccd enables sap * * Petr Vandrovec <VANDROVE@vc.cvut.cz> * P.S.: At least mask in line above is wrong - GPIO pins 3,2 select * input/output sound connection, so both must be set for output mode. * * Looks like it's needed only for the "tvphone", the "tvphone 98" * handles this with a tda9840 * */ void avermedia_tvphone_audio(struct bttv *btv, struct v4l2_tuner *t, int set) { int val = 0; if (set) { if (t->audmode & V4L2_TUNER_MODE_LANG2) /* SAP */ val = 0x02; if (t->audmode & V4L2_TUNER_MODE_STEREO) val = 0x01; if (val) { gpio_bits(0x03,val); if (bttv_gpio) bttv_gpio_tracking(btv,"avermedia"); } } else { t->audmode = V4L2_TUNER_MODE_MONO | V4L2_TUNER_MODE_STEREO | V4L2_TUNER_MODE_LANG1; return; } } void avermedia_tv_stereo_audio(struct bttv *btv, struct v4l2_tuner *t, int set) { int val = 0; if (set) { if (t->audmode & V4L2_TUNER_MODE_LANG2) /* SAP */ val = 0x01; if (t->audmode & V4L2_TUNER_MODE_STEREO) /* STEREO */ val = 0x02; btaor(val, ~0x03, BT848_GPIO_DATA); if (bttv_gpio) bttv_gpio_tracking(btv,"avermedia"); } else { t->audmode = V4L2_TUNER_MODE_MONO | V4L2_TUNER_MODE_STEREO | V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; return; } } /* Lifetec 9415 handling */ void lt9415_audio(struct bttv *btv, struct v4l2_tuner *t, int set) { int val = 0; if (gpio_read() & 0x4000) { t->audmode = V4L2_TUNER_MODE_MONO; return; } if (set) { if (t->audmode & V4L2_TUNER_MODE_LANG2) /* A2 SAP */ val = 0x0080; if (t->audmode & V4L2_TUNER_MODE_STEREO) /* A2 stereo */ val = 0x0880; if ((t->audmode & V4L2_TUNER_MODE_LANG1) || (t->audmode & V4L2_TUNER_MODE_MONO)) val = 0; gpio_bits(0x0880, val); if (bttv_gpio) bttv_gpio_tracking(btv,"lt9415"); } else { /* autodetect doesn't work with this card :-( */ t->audmode = V4L2_TUNER_MODE_MONO | V4L2_TUNER_MODE_STEREO | V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; return; } } /* TDA9821 on TerraTV+ Bt848, Bt878 */ void terratv_audio(struct bttv *btv, struct v4l2_tuner *t, int set) { unsigned int con = 0; if (set) { gpio_inout(0x180000,0x180000); if (t->audmode & V4L2_TUNER_MODE_LANG2) con = 0x080000; if (t->audmode & V4L2_TUNER_MODE_STEREO) con = 0x180000; gpio_bits(0x180000, con); if (bttv_gpio) bttv_gpio_tracking(btv,"terratv"); } else { t->audmode = V4L2_TUNER_MODE_MONO | V4L2_TUNER_MODE_STEREO | V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; } } void winfast2000_audio(struct bttv *btv, struct v4l2_tuner *t, int set) { unsigned long val = 0; if (set) { /*btor (0xc32000, BT848_GPIO_OUT_EN);*/ if (t->audmode & V4L2_TUNER_MODE_MONO) /* Mono */ val = 0x420000; if (t->audmode & V4L2_TUNER_MODE_LANG1) /* Mono */ val = 0x420000; if (t->audmode & V4L2_TUNER_MODE_LANG2) /* SAP */ val = 0x410000; if (t->audmode & V4L2_TUNER_MODE_STEREO) /* Stereo */ val = 0x020000; if (val) { gpio_bits(0x430000, val); if (bttv_gpio) bttv_gpio_tracking(btv,"winfast2000"); } } else { t->audmode = V4L2_TUNER_MODE_MONO | V4L2_TUNER_MODE_STEREO | V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; } } /* * Dariusz Kowalewski <darekk@automex.pl> * sound control for Prolink PV-BT878P+9B (PixelView PlayTV Pro FM+NICAM * revision 9B has on-board TDA9874A sound decoder). * * Note: There are card variants without tda9874a. Forcing the "stereo sound route" * will mute this cards. */ void pvbt878p9b_audio(struct bttv *btv, struct v4l2_tuner *t, int set) { unsigned int val = 0; if (btv->radio_user) return; if (set) { if (t->audmode & V4L2_TUNER_MODE_MONO) { val = 0x01; } if ((t->audmode & (V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2)) || (t->audmode & V4L2_TUNER_MODE_STEREO)) { val = 0x02; } if (val) { gpio_bits(0x03,val); if (bttv_gpio) bttv_gpio_tracking(btv,"pvbt878p9b"); } } else { t->audmode = V4L2_TUNER_MODE_MONO | V4L2_TUNER_MODE_STEREO | V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; } } /* * Dariusz Kowalewski <darekk@automex.pl> * sound control for FlyVideo 2000S (with tda9874 decoder) * based on pvbt878p9b_audio() - this is not tested, please fix!!! */ void fv2000s_audio(struct bttv *btv, struct v4l2_tuner *t, int set) { unsigned int val = 0xffff; if (btv->radio_user) return; if (set) { if (t->audmode & V4L2_TUNER_MODE_MONO) { val = 0x0000; } if ((t->audmode & (V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2)) || (t->audmode & V4L2_TUNER_MODE_STEREO)) { val = 0x1080; /*-dk-???: 0x0880, 0x0080, 0x1800 ... */ } if (val != 0xffff) { gpio_bits(0x1800, val); if (bttv_gpio) bttv_gpio_tracking(btv,"fv2000s"); } } else { t->audmode = V4L2_TUNER_MODE_MONO | V4L2_TUNER_MODE_STEREO | V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; } } /* * sound control for Canopus WinDVR PCI * Masaki Suzuki <masaki@btree.org> */ void windvr_audio(struct bttv *btv, struct v4l2_tuner *t, int set) { unsigned long val = 0; if (set) { if (t->audmode & V4L2_TUNER_MODE_MONO) val = 0x040000; if (t->audmode & V4L2_TUNER_MODE_LANG1) val = 0; if (t->audmode & V4L2_TUNER_MODE_LANG2) val = 0x100000; if (t->audmode & V4L2_TUNER_MODE_STEREO) val = 0; if (val) { gpio_bits(0x140000, val); if (bttv_gpio) bttv_gpio_tracking(btv,"windvr"); } } else { t->audmode = V4L2_TUNER_MODE_MONO | V4L2_TUNER_MODE_STEREO | V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; } } /* * sound control for AD-TVK503 * Hiroshi Takekawa <sian@big.or.jp> */ void adtvk503_audio(struct bttv *btv, struct v4l2_tuner *t, int set) { unsigned int con = 0xffffff; /* btaor(0x1e0000, ~0x1e0000, BT848_GPIO_OUT_EN); */ if (set) { /* btor(***, BT848_GPIO_OUT_EN); */ if (t->audmode & V4L2_TUNER_MODE_LANG1) con = 0x00000000; if (t->audmode & V4L2_TUNER_MODE_LANG2) con = 0x00180000; if (t->audmode & V4L2_TUNER_MODE_STEREO) con = 0x00000000; if (t->audmode & V4L2_TUNER_MODE_MONO) con = 0x00060000; if (con != 0xffffff) { gpio_bits(0x1e0000,con); if (bttv_gpio) bttv_gpio_tracking(btv, "adtvk503"); } } else { t->audmode = V4L2_TUNER_MODE_MONO | V4L2_TUNER_MODE_STEREO | V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; } }
gpl-2.0
Naios/MistCore
dep/StormLib/src/libtomcrypt/src/pk/asn1/der_length_utctime.c
246
1192
/* LibTomCrypt, modular cryptographic library -- Tom St Denis * * LibTomCrypt is a library that provides various cryptographic * algorithms in a highly modular and flexible manner. * * The library is free for all purposes without any express * guarantee it works. * * Tom St Denis, tomstdenis@gmail.com, http://libtom.org */ #include "../../headers/tomcrypt.h" /** @file der_length_utctime.c ASN.1 DER, get length of UTCTIME, Tom St Denis */ #ifdef LTC_DER /** Gets length of DER encoding of UTCTIME @param utctime The UTC time structure to get the size of @param outlen [out] The length of the DER encoding @return CRYPT_OK if successful */ int der_length_utctime(ltc_utctime *utctime, unsigned long *outlen) { LTC_ARGCHK(outlen != NULL); LTC_ARGCHK(utctime != NULL); if (utctime->off_hh == 0 && utctime->off_mm == 0) { /* we encode as YYMMDDhhmmssZ */ *outlen = 2 + 13; } else { /* we encode as YYMMDDhhmmss{+|-}hh'mm' */ *outlen = 2 + 17; } return CRYPT_OK; } #endif /* $Source: /cvs/libtom/libtomcrypt/src/pk/asn1/der/utctime/der_length_utctime.c,v $ */ /* $Revision: 1.5 $ */ /* $Date: 2006/12/28 01:27:24 $ */
gpl-2.0
ljtale/linux-3.14.40
arch/powerpc/mm/tlb_nohash.c
246
18126
/* * This file contains the routines for TLB flushing. * On machines where the MMU does not use a hash table to store virtual to * physical translations (ie, SW loaded TLBs or Book3E compilant processors, * this does -not- include 603 however which shares the implementation with * hash based processors) * * -- BenH * * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org> * IBM Corp. * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/preempt.h> #include <linux/spinlock.h> #include <linux/memblock.h> #include <linux/of_fdt.h> #include <linux/hugetlb.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/code-patching.h> #include <asm/hugetlb.h> #include <asm/paca.h> #include "mmu_decl.h" /* * This struct lists the sw-supported page sizes. The hardawre MMU may support * other sizes not listed here. The .ind field is only used on MMUs that have * indirect page table entries. */ #ifdef CONFIG_PPC_BOOK3E_MMU #ifdef CONFIG_PPC_FSL_BOOK3E struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { .shift = 12, .enc = BOOK3E_PAGESZ_4K, }, [MMU_PAGE_2M] = { .shift = 21, .enc = BOOK3E_PAGESZ_2M, }, [MMU_PAGE_4M] = { .shift = 22, .enc = BOOK3E_PAGESZ_4M, }, [MMU_PAGE_16M] = { .shift = 24, .enc = BOOK3E_PAGESZ_16M, }, [MMU_PAGE_64M] = { .shift = 26, .enc = BOOK3E_PAGESZ_64M, }, [MMU_PAGE_256M] = { .shift = 28, .enc = BOOK3E_PAGESZ_256M, }, [MMU_PAGE_1G] = { .shift = 30, .enc = BOOK3E_PAGESZ_1GB, }, }; #else struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { .shift = 12, .ind = 20, .enc = BOOK3E_PAGESZ_4K, }, [MMU_PAGE_16K] = { .shift = 14, .enc = BOOK3E_PAGESZ_16K, }, [MMU_PAGE_64K] = { .shift = 16, .ind = 28, .enc = BOOK3E_PAGESZ_64K, }, [MMU_PAGE_1M] = { .shift = 20, .enc = BOOK3E_PAGESZ_1M, }, [MMU_PAGE_16M] = { .shift = 24, .ind = 36, .enc = BOOK3E_PAGESZ_16M, }, [MMU_PAGE_256M] = { .shift = 28, .enc = BOOK3E_PAGESZ_256M, }, [MMU_PAGE_1G] = { .shift = 30, .enc = BOOK3E_PAGESZ_1GB, }, }; #endif /* CONFIG_FSL_BOOKE */ static inline int mmu_get_tsize(int psize) { return mmu_psize_defs[psize].enc; } #else static inline int mmu_get_tsize(int psize) { /* This isn't used on !Book3E for now */ return 0; } #endif /* CONFIG_PPC_BOOK3E_MMU */ /* The variables below are currently only used on 64-bit Book3E * though this will probably be made common with other nohash * implementations at some point */ #ifdef CONFIG_PPC64 int mmu_linear_psize; /* Page size used for the linear mapping */ int mmu_pte_psize; /* Page size used for PTE pages */ int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ unsigned long linear_map_top; /* Top of linear mapping */ #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC_FSL_BOOK3E /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ DEFINE_PER_CPU(int, next_tlbcam_idx); EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx); #endif /* * Base TLB flushing operations: * * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes kernel pages * * - local_* variants of page and mm only apply to the current * processor */ /* * These are the base non-SMP variants of page and mm flushing */ void local_flush_tlb_mm(struct mm_struct *mm) { unsigned int pid; preempt_disable(); pid = mm->context.id; if (pid != MMU_NO_CONTEXT) _tlbil_pid(pid); preempt_enable(); } EXPORT_SYMBOL(local_flush_tlb_mm); void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, int tsize, int ind) { unsigned int pid; preempt_disable(); pid = mm ? mm->context.id : 0; if (pid != MMU_NO_CONTEXT) _tlbil_va(vmaddr, pid, tsize, ind); preempt_enable(); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, mmu_get_tsize(mmu_virtual_psize), 0); } EXPORT_SYMBOL(local_flush_tlb_page); /* * And here are the SMP non-local implementations */ #ifdef CONFIG_SMP static DEFINE_RAW_SPINLOCK(tlbivax_lock); static int mm_is_core_local(struct mm_struct *mm) { return cpumask_subset(mm_cpumask(mm), topology_thread_cpumask(smp_processor_id())); } struct tlb_flush_param { unsigned long addr; unsigned int pid; unsigned int tsize; unsigned int ind; }; static void do_flush_tlb_mm_ipi(void *param) { struct tlb_flush_param *p = param; _tlbil_pid(p ? p->pid : 0); } static void do_flush_tlb_page_ipi(void *param) { struct tlb_flush_param *p = param; _tlbil_va(p->addr, p->pid, p->tsize, p->ind); } /* Note on invalidations and PID: * * We snapshot the PID with preempt disabled. At this point, it can still * change either because: * - our context is being stolen (PID -> NO_CONTEXT) on another CPU * - we are invaliating some target that isn't currently running here * and is concurrently acquiring a new PID on another CPU * - some other CPU is re-acquiring a lost PID for this mm * etc... * * However, this shouldn't be a problem as we only guarantee * invalidation of TLB entries present prior to this call, so we * don't care about the PID changing, and invalidating a stale PID * is generally harmless. */ void flush_tlb_mm(struct mm_struct *mm) { unsigned int pid; preempt_disable(); pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) goto no_context; if (!mm_is_core_local(mm)) { struct tlb_flush_param p = { .pid = pid }; /* Ignores smp_processor_id() even if set. */ smp_call_function_many(mm_cpumask(mm), do_flush_tlb_mm_ipi, &p, 1); } _tlbil_pid(pid); no_context: preempt_enable(); } EXPORT_SYMBOL(flush_tlb_mm); void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, int tsize, int ind) { struct cpumask *cpu_mask; unsigned int pid; preempt_disable(); pid = mm ? mm->context.id : 0; if (unlikely(pid == MMU_NO_CONTEXT)) goto bail; cpu_mask = mm_cpumask(mm); if (!mm_is_core_local(mm)) { /* If broadcast tlbivax is supported, use it */ if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); if (lock) raw_spin_lock(&tlbivax_lock); _tlbivax_bcast(vmaddr, pid, tsize, ind); if (lock) raw_spin_unlock(&tlbivax_lock); goto bail; } else { struct tlb_flush_param p = { .pid = pid, .addr = vmaddr, .tsize = tsize, .ind = ind, }; /* Ignores smp_processor_id() even if set in cpu_mask */ smp_call_function_many(cpu_mask, do_flush_tlb_page_ipi, &p, 1); } } _tlbil_va(vmaddr, pid, tsize, ind); bail: preempt_enable(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { #ifdef CONFIG_HUGETLB_PAGE if (vma && is_vm_hugetlb_page(vma)) flush_hugetlb_page(vma, vmaddr); #endif __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, mmu_get_tsize(mmu_virtual_psize), 0); } EXPORT_SYMBOL(flush_tlb_page); #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC_47x void __init early_init_mmu_47x(void) { #ifdef CONFIG_SMP unsigned long root = of_get_flat_dt_root(); if (of_get_flat_dt_prop(root, "cooperative-partition", NULL)) mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST); #endif /* CONFIG_SMP */ } #endif /* CONFIG_PPC_47x */ /* * Flush kernel TLB entries in the given range */ void flush_tlb_kernel_range(unsigned long start, unsigned long end) { #ifdef CONFIG_SMP preempt_disable(); smp_call_function(do_flush_tlb_mm_ipi, NULL, 1); _tlbil_pid(0); preempt_enable(); #else _tlbil_pid(0); #endif } EXPORT_SYMBOL(flush_tlb_kernel_range); /* * Currently, for range flushing, we just do a full mm flush. This should * be optimized based on a threshold on the size of the range, since * some implementation can stack multiple tlbivax before a tlbsync but * for now, we keep it that way */ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { flush_tlb_mm(vma->vm_mm); } EXPORT_SYMBOL(flush_tlb_range); void tlb_flush(struct mmu_gather *tlb) { flush_tlb_mm(tlb->mm); } /* * Below are functions specific to the 64-bit variant of Book3E though that * may change in the future */ #ifdef CONFIG_PPC64 /* * Handling of virtual linear page tables or indirect TLB entries * flushing when PTE pages are freed */ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) { int tsize = mmu_psize_defs[mmu_pte_psize].enc; if (book3e_htw_mode != PPC_HTW_NONE) { unsigned long start = address & PMD_MASK; unsigned long end = address + PMD_SIZE; unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; /* This isn't the most optimal, ideally we would factor out the * while preempt & CPU mask mucking around, or even the IPI but * it will do for now */ while (start < end) { __flush_tlb_page(tlb->mm, start, tsize, 1); start += size; } } else { unsigned long rmask = 0xf000000000000000ul; unsigned long rid = (address & rmask) | 0x1000000000000000ul; unsigned long vpte = address & ~rmask; #ifdef CONFIG_PPC_64K_PAGES vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful; #else vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; #endif vpte |= rid; __flush_tlb_page(tlb->mm, vpte, tsize, 0); } } static void setup_page_sizes(void) { unsigned int tlb0cfg; unsigned int tlb0ps; unsigned int eptcfg; int i, psize; #ifdef CONFIG_PPC_FSL_BOOK3E unsigned int mmucfg = mfspr(SPRN_MMUCFG); int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); unsigned int min_pg, max_pg; min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def; unsigned int shift; def = &mmu_psize_defs[psize]; shift = def->shift; if (shift == 0 || shift & 1) continue; /* adjust to be in terms of 4^shift Kb */ shift = (shift - 10) >> 1; if ((shift >= min_pg) && (shift <= max_pg)) def->flags |= MMU_PAGE_SIZE_DIRECT; } goto out; } if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { u32 tlb1cfg, tlb1ps; tlb0cfg = mfspr(SPRN_TLB0CFG); tlb1cfg = mfspr(SPRN_TLB1CFG); tlb1ps = mfspr(SPRN_TLB1PS); eptcfg = mfspr(SPRN_EPTCFG); if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) book3e_htw_mode = PPC_HTW_E6500; /* * We expect 4K subpage size and unrestricted indirect size. * The lack of a restriction on indirect size is a Freescale * extension, indicated by PSn = 0 but SPSn != 0. */ if (eptcfg != 2) book3e_htw_mode = PPC_HTW_NONE; for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; if (tlb1ps & (1U << (def->shift - 10))) { def->flags |= MMU_PAGE_SIZE_DIRECT; if (book3e_htw_mode && psize == MMU_PAGE_2M) def->flags |= MMU_PAGE_SIZE_INDIRECT; } } goto out; } #endif tlb0cfg = mfspr(SPRN_TLB0CFG); tlb0ps = mfspr(SPRN_TLB0PS); eptcfg = mfspr(SPRN_EPTCFG); /* Look for supported direct sizes */ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; if (tlb0ps & (1U << (def->shift - 10))) def->flags |= MMU_PAGE_SIZE_DIRECT; } /* Indirect page sizes supported ? */ if ((tlb0cfg & TLBnCFG_IND) == 0 || (tlb0cfg & TLBnCFG_PT) == 0) goto out; book3e_htw_mode = PPC_HTW_IBM; /* Now, we only deal with one IND page size for each * direct size. Hopefully all implementations today are * unambiguous, but we might want to be careful in the * future. */ for (i = 0; i < 3; i++) { unsigned int ps, sps; sps = eptcfg & 0x1f; eptcfg >>= 5; ps = eptcfg & 0x1f; eptcfg >>= 5; if (!ps || !sps) continue; for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; if (ps == (def->shift - 10)) def->flags |= MMU_PAGE_SIZE_INDIRECT; if (sps == (def->shift - 10)) def->ind = ps + 10; } } out: /* Cleanup array and print summary */ pr_info("MMU: Supported page sizes\n"); for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; const char *__page_type_names[] = { "unsupported", "direct", "indirect", "direct & indirect" }; if (def->flags == 0) { def->shift = 0; continue; } pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), __page_type_names[def->flags & 0x3]); } } static void setup_mmu_htw(void) { /* * If we want to use HW tablewalk, enable it by patching the TLB miss * handlers to branch to the one dedicated to it. */ switch (book3e_htw_mode) { case PPC_HTW_IBM: patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); break; #ifdef CONFIG_PPC_FSL_BOOK3E case PPC_HTW_E6500: patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); break; #endif } pr_info("MMU: Book3E HW tablewalk %s\n", book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); } /* * Early initialization of the MMU TLB code */ static void __early_init_mmu(int boot_cpu) { unsigned int mas4; /* XXX This will have to be decided at runtime, but right * now our boot and TLB miss code hard wires it. Ideally * we should find out a suitable page size and patch the * TLB miss code (either that or use the PACA to store * the value we want) */ mmu_linear_psize = MMU_PAGE_1G; /* XXX This should be decided at runtime based on supported * page sizes in the TLB, but for now let's assume 16M is * always there and a good fit (which it probably is) */ mmu_vmemmap_psize = MMU_PAGE_16M; /* XXX This code only checks for TLB 0 capabilities and doesn't * check what page size combos are supported by the HW. It * also doesn't handle the case where a separate array holds * the IND entries from the array loaded by the PT. */ if (boot_cpu) { /* Look for supported page sizes */ setup_page_sizes(); /* Look for HW tablewalk support */ setup_mmu_htw(); } /* Set MAS4 based on page table setting */ mas4 = 0x4 << MAS4_WIMGED_SHIFT; switch (book3e_htw_mode) { case PPC_HTW_E6500: mas4 |= MAS4_INDD; mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; mas4 |= MAS4_TLBSELD(1); mmu_pte_psize = MMU_PAGE_2M; break; case PPC_HTW_IBM: mas4 |= MAS4_INDD; #ifdef CONFIG_PPC_64K_PAGES mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT; mmu_pte_psize = MMU_PAGE_256M; #else mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; mmu_pte_psize = MMU_PAGE_1M; #endif break; case PPC_HTW_NONE: #ifdef CONFIG_PPC_64K_PAGES mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT; #else mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; #endif mmu_pte_psize = mmu_virtual_psize; break; } mtspr(SPRN_MAS4, mas4); /* Set the global containing the top of the linear mapping * for use by the TLB miss code */ linear_map_top = memblock_end_of_DRAM(); #ifdef CONFIG_PPC_FSL_BOOK3E if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned int num_cams; /* use a quarter of the TLBCAM for bolted linear map */ num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; linear_map_top = map_mem_in_cams(linear_map_top, num_cams); /* limit memory so we dont have linear faults */ memblock_enforce_memory_limit(linear_map_top); if (book3e_htw_mode == PPC_HTW_NONE) { patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e); } } #endif /* A sync won't hurt us after mucking around with * the MMU configuration */ mb(); memblock_set_current_limit(linear_map_top); } void __init early_init_mmu(void) { __early_init_mmu(1); } void early_init_mmu_secondary(void) { __early_init_mmu(0); } void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { /* On non-FSL Embedded 64-bit, we adjust the RMA size to match * the bolted TLB entry. We know for now that only 1G * entries are supported though that may eventually * change. * * on FSL Embedded 64-bit, we adjust the RMA size to match the * first bolted TLB entry size. We still limit max to 1G even if * the TLB could cover more. This is due to what the early init * code is setup to do. * * We crop it to the size of the first MEMBLOCK to * avoid going over total available memory just in case... */ #ifdef CONFIG_PPC_FSL_BOOK3E if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned long linear_sz; linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET, first_memblock_base); ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); } else #endif ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); /* Finally limit subsequent allocations */ memblock_set_current_limit(first_memblock_base + ppc64_rma_size); } #else /* ! CONFIG_PPC64 */ void __init early_init_mmu(void) { #ifdef CONFIG_PPC_47x early_init_mmu_47x(); #endif } #endif /* CONFIG_PPC64 */
gpl-2.0
stratosk/semaphore
drivers/serial/atmel_serial.c
758
45275
/* * linux/drivers/char/atmel_serial.c * * Driver for Atmel AT91 / AT32 Serial ports * Copyright (C) 2003 Rick Bronson * * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * DMA support added by Chip Coldwell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/tty.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/clk.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/tty_flip.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/atmel_pdc.h> #include <linux/atmel_serial.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/ioctls.h> #include <asm/mach/serial_at91.h> #include <mach/board.h> #ifdef CONFIG_ARM #include <mach/cpu.h> #include <mach/gpio.h> #endif #define PDC_BUFFER_SIZE 512 /* Revisit: We should calculate this based on the actual port settings */ #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */ #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/serial_core.h> static void atmel_start_rx(struct uart_port *port); static void atmel_stop_rx(struct uart_port *port); #ifdef CONFIG_SERIAL_ATMEL_TTYAT /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we * should coexist with the 8250 driver, such as if we have an external 16C550 * UART. */ #define SERIAL_ATMEL_MAJOR 204 #define MINOR_START 154 #define ATMEL_DEVICENAME "ttyAT" #else /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port * name, but it is legally reserved for the 8250 driver. */ #define SERIAL_ATMEL_MAJOR TTY_MAJOR #define MINOR_START 64 #define ATMEL_DEVICENAME "ttyS" #endif #define ATMEL_ISR_PASS_LIMIT 256 /* UART registers. CR is write-only, hence no GET macro */ #define UART_PUT_CR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_CR) #define UART_GET_MR(port) __raw_readl((port)->membase + ATMEL_US_MR) #define UART_PUT_MR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_MR) #define UART_PUT_IER(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IER) #define UART_PUT_IDR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IDR) #define UART_GET_IMR(port) __raw_readl((port)->membase + ATMEL_US_IMR) #define UART_GET_CSR(port) __raw_readl((port)->membase + ATMEL_US_CSR) #define UART_GET_CHAR(port) __raw_readl((port)->membase + ATMEL_US_RHR) #define UART_PUT_CHAR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_THR) #define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR) #define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR) #define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR) #define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR) /* PDC registers */ #define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR) #define UART_GET_PTSR(port) __raw_readl((port)->membase + ATMEL_PDC_PTSR) #define UART_PUT_RPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RPR) #define UART_GET_RPR(port) __raw_readl((port)->membase + ATMEL_PDC_RPR) #define UART_PUT_RCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RCR) #define UART_PUT_RNPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNPR) #define UART_PUT_RNCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNCR) #define UART_PUT_TPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TPR) #define UART_PUT_TCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TCR) #define UART_GET_TCR(port) __raw_readl((port)->membase + ATMEL_PDC_TCR) static int (*atmel_open_hook)(struct uart_port *); static void (*atmel_close_hook)(struct uart_port *); struct atmel_dma_buffer { unsigned char *buf; dma_addr_t dma_addr; unsigned int dma_size; unsigned int ofs; }; struct atmel_uart_char { u16 status; u16 ch; }; #define ATMEL_SERIAL_RINGSIZE 1024 /* * We wrap our port structure around the generic uart_port. */ struct atmel_uart_port { struct uart_port uart; /* uart */ struct clk *clk; /* uart clock */ int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ u32 backup_imr; /* IMR saved during suspend */ int break_active; /* break being received */ short use_dma_rx; /* enable PDC receiver */ short pdc_rx_idx; /* current PDC RX buffer */ struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ short use_dma_tx; /* enable PDC transmitter */ struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ struct tasklet_struct tasklet; unsigned int irq_status; unsigned int irq_status_prev; struct circ_buf rx_ring; struct serial_rs485 rs485; /* rs485 settings */ unsigned int tx_done_mask; }; static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; #ifdef SUPPORT_SYSRQ static struct console atmel_console; #endif static inline struct atmel_uart_port * to_atmel_uart_port(struct uart_port *uart) { return container_of(uart, struct atmel_uart_port, uart); } #ifdef CONFIG_SERIAL_ATMEL_PDC static bool atmel_use_dma_rx(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); return atmel_port->use_dma_rx; } static bool atmel_use_dma_tx(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); return atmel_port->use_dma_tx; } #else static bool atmel_use_dma_rx(struct uart_port *port) { return false; } static bool atmel_use_dma_tx(struct uart_port *port) { return false; } #endif /* Enable or disable the rs485 support */ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned int mode; spin_lock(&port->lock); /* Disable interrupts */ UART_PUT_IDR(port, atmel_port->tx_done_mask); mode = UART_GET_MR(port); /* Resetting serial mode to RS232 (0x0) */ mode &= ~ATMEL_US_USMODE; atmel_port->rs485 = *rs485conf; if (rs485conf->flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; UART_PUT_TTGR(port, rs485conf->delay_rts_before_send); mode |= ATMEL_US_USMODE_RS485; } else { dev_dbg(port->dev, "Setting UART to RS232\n"); if (atmel_use_dma_tx(port)) atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; else atmel_port->tx_done_mask = ATMEL_US_TXRDY; } UART_PUT_MR(port, mode); /* Enable interrupts */ UART_PUT_IER(port, atmel_port->tx_done_mask); spin_unlock(&port->lock); } /* * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. */ static u_int atmel_tx_empty(struct uart_port *port) { return (UART_GET_CSR(port) & ATMEL_US_TXEMPTY) ? TIOCSER_TEMT : 0; } /* * Set state of the modem control output lines */ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) { unsigned int control = 0; unsigned int mode; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); #ifdef CONFIG_ARCH_AT91RM9200 if (cpu_is_at91rm9200()) { /* * AT91RM9200 Errata #39: RTS0 is not internally connected * to PA21. We need to drive the pin manually. */ if (port->mapbase == AT91RM9200_BASE_US0) { if (mctrl & TIOCM_RTS) at91_set_gpio_value(AT91_PIN_PA21, 0); else at91_set_gpio_value(AT91_PIN_PA21, 1); } } #endif if (mctrl & TIOCM_RTS) control |= ATMEL_US_RTSEN; else control |= ATMEL_US_RTSDIS; if (mctrl & TIOCM_DTR) control |= ATMEL_US_DTREN; else control |= ATMEL_US_DTRDIS; UART_PUT_CR(port, control); /* Local loopback mode? */ mode = UART_GET_MR(port) & ~ATMEL_US_CHMODE; if (mctrl & TIOCM_LOOP) mode |= ATMEL_US_CHMODE_LOC_LOOP; else mode |= ATMEL_US_CHMODE_NORMAL; /* Resetting serial mode to RS232 (0x0) */ mode &= ~ATMEL_US_USMODE; if (atmel_port->rs485.flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send); mode |= ATMEL_US_USMODE_RS485; } else { dev_dbg(port->dev, "Setting UART to RS232\n"); } UART_PUT_MR(port, mode); } /* * Get state of the modem control input lines */ static u_int atmel_get_mctrl(struct uart_port *port) { unsigned int status, ret = 0; status = UART_GET_CSR(port); /* * The control signals are active low. */ if (!(status & ATMEL_US_DCD)) ret |= TIOCM_CD; if (!(status & ATMEL_US_CTS)) ret |= TIOCM_CTS; if (!(status & ATMEL_US_DSR)) ret |= TIOCM_DSR; if (!(status & ATMEL_US_RI)) ret |= TIOCM_RI; return ret; } /* * Stop transmitting. */ static void atmel_stop_tx(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (atmel_use_dma_tx(port)) { /* disable PDC transmit */ UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); } /* Disable interrupts */ UART_PUT_IDR(port, atmel_port->tx_done_mask); if (atmel_port->rs485.flags & SER_RS485_ENABLED) atmel_start_rx(port); } /* * Start transmitting. */ static void atmel_start_tx(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (atmel_use_dma_tx(port)) { if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN) /* The transmitter is already running. Yes, we really need this.*/ return; if (atmel_port->rs485.flags & SER_RS485_ENABLED) atmel_stop_rx(port); /* re-enable PDC transmit */ UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); } /* Enable interrupts */ UART_PUT_IER(port, atmel_port->tx_done_mask); } /* * start receiving - port is in process of being opened. */ static void atmel_start_rx(struct uart_port *port) { UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */ if (atmel_use_dma_rx(port)) { /* enable PDC controller */ UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | port->read_status_mask); UART_PUT_PTCR(port, ATMEL_PDC_RXTEN); } else { UART_PUT_IER(port, ATMEL_US_RXRDY); } } /* * Stop receiving - port is in process of being closed. */ static void atmel_stop_rx(struct uart_port *port) { if (atmel_use_dma_rx(port)) { /* disable PDC receive */ UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS); UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | port->read_status_mask); } else { UART_PUT_IDR(port, ATMEL_US_RXRDY); } } /* * Enable modem status interrupts */ static void atmel_enable_ms(struct uart_port *port) { UART_PUT_IER(port, ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC | ATMEL_US_CTSIC); } /* * Control the transmission of a break signal */ static void atmel_break_ctl(struct uart_port *port, int break_state) { if (break_state != 0) UART_PUT_CR(port, ATMEL_US_STTBRK); /* start break */ else UART_PUT_CR(port, ATMEL_US_STPBRK); /* stop break */ } /* * Stores the incoming character in the ring buffer */ static void atmel_buffer_rx_char(struct uart_port *port, unsigned int status, unsigned int ch) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct circ_buf *ring = &atmel_port->rx_ring; struct atmel_uart_char *c; if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) /* Buffer overflow, ignore char */ return; c = &((struct atmel_uart_char *)ring->buf)[ring->head]; c->status = status; c->ch = ch; /* Make sure the character is stored before we update head. */ smp_wmb(); ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); } /* * Deal with parity, framing and overrun errors. */ static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status) { /* clear error */ UART_PUT_CR(port, ATMEL_US_RSTSTA); if (status & ATMEL_US_RXBRK) { /* ignore side-effect */ status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); port->icount.brk++; } if (status & ATMEL_US_PARE) port->icount.parity++; if (status & ATMEL_US_FRAME) port->icount.frame++; if (status & ATMEL_US_OVRE) port->icount.overrun++; } /* * Characters received (called from interrupt handler) */ static void atmel_rx_chars(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned int status, ch; status = UART_GET_CSR(port); while (status & ATMEL_US_RXRDY) { ch = UART_GET_CHAR(port); /* * note that the error handling code is * out of the main execution path */ if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME | ATMEL_US_OVRE | ATMEL_US_RXBRK) || atmel_port->break_active)) { /* clear error */ UART_PUT_CR(port, ATMEL_US_RSTSTA); if (status & ATMEL_US_RXBRK && !atmel_port->break_active) { atmel_port->break_active = 1; UART_PUT_IER(port, ATMEL_US_RXBRK); } else { /* * This is either the end-of-break * condition or we've received at * least one character without RXBRK * being set. In both cases, the next * RXBRK will indicate start-of-break. */ UART_PUT_IDR(port, ATMEL_US_RXBRK); status &= ~ATMEL_US_RXBRK; atmel_port->break_active = 0; } } atmel_buffer_rx_char(port, status, ch); status = UART_GET_CSR(port); } tasklet_schedule(&atmel_port->tasklet); } /* * Transmit characters (called from tasklet with TXRDY interrupt * disabled) */ static void atmel_tx_chars(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) { UART_PUT_CHAR(port, port->x_char); port->icount.tx++; port->x_char = 0; } if (uart_circ_empty(xmit) || uart_tx_stopped(port)) return; while (UART_GET_CSR(port) & atmel_port->tx_done_mask) { UART_PUT_CHAR(port, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; if (uart_circ_empty(xmit)) break; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); if (!uart_circ_empty(xmit)) /* Enable interrupts */ UART_PUT_IER(port, atmel_port->tx_done_mask); } /* * receive interrupt handler. */ static void atmel_handle_receive(struct uart_port *port, unsigned int pending) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (atmel_use_dma_rx(port)) { /* * PDC receive. Just schedule the tasklet and let it * figure out the details. * * TODO: We're not handling error flags correctly at * the moment. */ if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) { UART_PUT_IDR(port, (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)); tasklet_schedule(&atmel_port->tasklet); } if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE | ATMEL_US_FRAME | ATMEL_US_PARE)) atmel_pdc_rxerr(port, pending); } /* Interrupt receive */ if (pending & ATMEL_US_RXRDY) atmel_rx_chars(port); else if (pending & ATMEL_US_RXBRK) { /* * End of break detected. If it came along with a * character, atmel_rx_chars will handle it. */ UART_PUT_CR(port, ATMEL_US_RSTSTA); UART_PUT_IDR(port, ATMEL_US_RXBRK); atmel_port->break_active = 0; } } /* * transmit interrupt handler. (Transmit is IRQF_NODELAY safe) */ static void atmel_handle_transmit(struct uart_port *port, unsigned int pending) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (pending & atmel_port->tx_done_mask) { /* Either PDC or interrupt transmission */ UART_PUT_IDR(port, atmel_port->tx_done_mask); tasklet_schedule(&atmel_port->tasklet); } } /* * status flags interrupt handler. */ static void atmel_handle_status(struct uart_port *port, unsigned int pending, unsigned int status) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC | ATMEL_US_CTSIC)) { atmel_port->irq_status = status; tasklet_schedule(&atmel_port->tasklet); } } /* * Interrupt handler */ static irqreturn_t atmel_interrupt(int irq, void *dev_id) { struct uart_port *port = dev_id; unsigned int status, pending, pass_counter = 0; do { status = UART_GET_CSR(port); pending = status & UART_GET_IMR(port); if (!pending) break; atmel_handle_receive(port, pending); atmel_handle_status(port, pending, status); atmel_handle_transmit(port, pending); } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); return pass_counter ? IRQ_HANDLED : IRQ_NONE; } /* * Called from tasklet with ENDTX and TXBUFE interrupts disabled. */ static void atmel_tx_dma(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct circ_buf *xmit = &port->state->xmit; struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; int count; /* nothing left to transmit? */ if (UART_GET_TCR(port)) return; xmit->tail += pdc->ofs; xmit->tail &= UART_XMIT_SIZE - 1; port->icount.tx += pdc->ofs; pdc->ofs = 0; /* more to transmit - setup next transfer */ /* disable PDC transmit */ UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { dma_sync_single_for_device(port->dev, pdc->dma_addr, pdc->dma_size, DMA_TO_DEVICE); count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); pdc->ofs = count; UART_PUT_TPR(port, pdc->dma_addr + xmit->tail); UART_PUT_TCR(port, count); /* re-enable PDC transmit */ UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); /* Enable interrupts */ UART_PUT_IER(port, atmel_port->tx_done_mask); } else { if (atmel_port->rs485.flags & SER_RS485_ENABLED) { /* DMA done, stop TX, start RX for RS485 */ atmel_start_rx(port); } } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); } static void atmel_rx_from_ring(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct circ_buf *ring = &atmel_port->rx_ring; unsigned int flg; unsigned int status; while (ring->head != ring->tail) { struct atmel_uart_char c; /* Make sure c is loaded after head. */ smp_rmb(); c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); port->icount.rx++; status = c.status; flg = TTY_NORMAL; /* * note that the error handling code is * out of the main execution path */ if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME | ATMEL_US_OVRE | ATMEL_US_RXBRK))) { if (status & ATMEL_US_RXBRK) { /* ignore side-effect */ status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); port->icount.brk++; if (uart_handle_break(port)) continue; } if (status & ATMEL_US_PARE) port->icount.parity++; if (status & ATMEL_US_FRAME) port->icount.frame++; if (status & ATMEL_US_OVRE) port->icount.overrun++; status &= port->read_status_mask; if (status & ATMEL_US_RXBRK) flg = TTY_BREAK; else if (status & ATMEL_US_PARE) flg = TTY_PARITY; else if (status & ATMEL_US_FRAME) flg = TTY_FRAME; } if (uart_handle_sysrq_char(port, c.ch)) continue; uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); } /* * Drop the lock here since it might end up calling * uart_start(), which takes the lock. */ spin_unlock(&port->lock); tty_flip_buffer_push(port->state->port.tty); spin_lock(&port->lock); } static void atmel_rx_from_dma(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct tty_struct *tty = port->state->port.tty; struct atmel_dma_buffer *pdc; int rx_idx = atmel_port->pdc_rx_idx; unsigned int head; unsigned int tail; unsigned int count; do { /* Reset the UART timeout early so that we don't miss one */ UART_PUT_CR(port, ATMEL_US_STTTO); pdc = &atmel_port->pdc_rx[rx_idx]; head = UART_GET_RPR(port) - pdc->dma_addr; tail = pdc->ofs; /* If the PDC has switched buffers, RPR won't contain * any address within the current buffer. Since head * is unsigned, we just need a one-way comparison to * find out. * * In this case, we just need to consume the entire * buffer and resubmit it for DMA. This will clear the * ENDRX bit as well, so that we can safely re-enable * all interrupts below. */ head = min(head, pdc->dma_size); if (likely(head != tail)) { dma_sync_single_for_cpu(port->dev, pdc->dma_addr, pdc->dma_size, DMA_FROM_DEVICE); /* * head will only wrap around when we recycle * the DMA buffer, and when that happens, we * explicitly set tail to 0. So head will * always be greater than tail. */ count = head - tail; tty_insert_flip_string(tty, pdc->buf + pdc->ofs, count); dma_sync_single_for_device(port->dev, pdc->dma_addr, pdc->dma_size, DMA_FROM_DEVICE); port->icount.rx += count; pdc->ofs = head; } /* * If the current buffer is full, we need to check if * the next one contains any additional data. */ if (head >= pdc->dma_size) { pdc->ofs = 0; UART_PUT_RNPR(port, pdc->dma_addr); UART_PUT_RNCR(port, pdc->dma_size); rx_idx = !rx_idx; atmel_port->pdc_rx_idx = rx_idx; } } while (head >= pdc->dma_size); /* * Drop the lock here since it might end up calling * uart_start(), which takes the lock. */ spin_unlock(&port->lock); tty_flip_buffer_push(tty); spin_lock(&port->lock); UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); } /* * tasklet handling tty stuff outside the interrupt handler. */ static void atmel_tasklet_func(unsigned long data) { struct uart_port *port = (struct uart_port *)data; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned int status; unsigned int status_change; /* The interrupt handler does not take the lock */ spin_lock(&port->lock); if (atmel_use_dma_tx(port)) atmel_tx_dma(port); else atmel_tx_chars(port); status = atmel_port->irq_status; status_change = status ^ atmel_port->irq_status_prev; if (status_change & (ATMEL_US_RI | ATMEL_US_DSR | ATMEL_US_DCD | ATMEL_US_CTS)) { /* TODO: All reads to CSR will clear these interrupts! */ if (status_change & ATMEL_US_RI) port->icount.rng++; if (status_change & ATMEL_US_DSR) port->icount.dsr++; if (status_change & ATMEL_US_DCD) uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); if (status_change & ATMEL_US_CTS) uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); wake_up_interruptible(&port->state->port.delta_msr_wait); atmel_port->irq_status_prev = status; } if (atmel_use_dma_rx(port)) atmel_rx_from_dma(port); else atmel_rx_from_ring(port); spin_unlock(&port->lock); } /* * Perform initialization and enable port for reception */ static int atmel_startup(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct tty_struct *tty = port->state->port.tty; int retval; /* * Ensure that no interrupts are enabled otherwise when * request_irq() is called we could get stuck trying to * handle an unexpected interrupt */ UART_PUT_IDR(port, -1); /* * Allocate the IRQ */ retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED, tty ? tty->name : "atmel_serial", port); if (retval) { printk("atmel_serial: atmel_startup - Can't get irq\n"); return retval; } /* * Initialize DMA (if necessary) */ if (atmel_use_dma_rx(port)) { int i; for (i = 0; i < 2; i++) { struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL); if (pdc->buf == NULL) { if (i != 0) { dma_unmap_single(port->dev, atmel_port->pdc_rx[0].dma_addr, PDC_BUFFER_SIZE, DMA_FROM_DEVICE); kfree(atmel_port->pdc_rx[0].buf); } free_irq(port->irq, port); return -ENOMEM; } pdc->dma_addr = dma_map_single(port->dev, pdc->buf, PDC_BUFFER_SIZE, DMA_FROM_DEVICE); pdc->dma_size = PDC_BUFFER_SIZE; pdc->ofs = 0; } atmel_port->pdc_rx_idx = 0; UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr); UART_PUT_RCR(port, PDC_BUFFER_SIZE); UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr); UART_PUT_RNCR(port, PDC_BUFFER_SIZE); } if (atmel_use_dma_tx(port)) { struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; struct circ_buf *xmit = &port->state->xmit; pdc->buf = xmit->buf; pdc->dma_addr = dma_map_single(port->dev, pdc->buf, UART_XMIT_SIZE, DMA_TO_DEVICE); pdc->dma_size = UART_XMIT_SIZE; pdc->ofs = 0; } /* * If there is a specific "open" function (to register * control line interrupts) */ if (atmel_open_hook) { retval = atmel_open_hook(port); if (retval) { free_irq(port->irq, port); return retval; } } /* Save current CSR for comparison in atmel_tasklet_func() */ atmel_port->irq_status_prev = UART_GET_CSR(port); atmel_port->irq_status = atmel_port->irq_status_prev; /* * Finally, enable the serial port */ UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); /* enable xmit & rcvr */ UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); if (atmel_use_dma_rx(port)) { /* set UART timeout */ UART_PUT_RTOR(port, PDC_RX_TIMEOUT); UART_PUT_CR(port, ATMEL_US_STTTO); UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); /* enable PDC controller */ UART_PUT_PTCR(port, ATMEL_PDC_RXTEN); } else { /* enable receive only */ UART_PUT_IER(port, ATMEL_US_RXRDY); } return 0; } /* * Disable the port */ static void atmel_shutdown(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); /* * Ensure everything is stopped. */ atmel_stop_rx(port); atmel_stop_tx(port); /* * Shut-down the DMA. */ if (atmel_use_dma_rx(port)) { int i; for (i = 0; i < 2; i++) { struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; dma_unmap_single(port->dev, pdc->dma_addr, pdc->dma_size, DMA_FROM_DEVICE); kfree(pdc->buf); } } if (atmel_use_dma_tx(port)) { struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; dma_unmap_single(port->dev, pdc->dma_addr, pdc->dma_size, DMA_TO_DEVICE); } /* * Disable all interrupts, port and break condition. */ UART_PUT_CR(port, ATMEL_US_RSTSTA); UART_PUT_IDR(port, -1); /* * Free the interrupt */ free_irq(port->irq, port); /* * If there is a specific "close" function (to unregister * control line interrupts) */ if (atmel_close_hook) atmel_close_hook(port); } /* * Flush any TX data submitted for DMA. Called when the TX circular * buffer is reset. */ static void atmel_flush_buffer(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (atmel_use_dma_tx(port)) { UART_PUT_TCR(port, 0); atmel_port->pdc_tx.ofs = 0; } } /* * Power / Clock management. */ static void atmel_serial_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); switch (state) { case 0: /* * Enable the peripheral clock for this serial port. * This is called on uart_open() or a resume event. */ clk_enable(atmel_port->clk); /* re-enable interrupts if we disabled some on suspend */ UART_PUT_IER(port, atmel_port->backup_imr); break; case 3: /* Back up the interrupt mask and disable all interrupts */ atmel_port->backup_imr = UART_GET_IMR(port); UART_PUT_IDR(port, -1); /* * Disable the peripheral clock for this serial port. * This is called on uart_close() or a suspend event. */ clk_disable(atmel_port->clk); break; default: printk(KERN_ERR "atmel_serial: unknown pm %d\n", state); } } /* * Change the port parameters */ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { unsigned long flags; unsigned int mode, imr, quot, baud; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); /* Get current mode register */ mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP | ATMEL_US_PAR | ATMEL_US_USMODE); baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); quot = uart_get_divisor(port, baud); if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */ quot /= 8; mode |= ATMEL_US_USCLKS_MCK_DIV8; } /* byte size */ switch (termios->c_cflag & CSIZE) { case CS5: mode |= ATMEL_US_CHRL_5; break; case CS6: mode |= ATMEL_US_CHRL_6; break; case CS7: mode |= ATMEL_US_CHRL_7; break; default: mode |= ATMEL_US_CHRL_8; break; } /* stop bits */ if (termios->c_cflag & CSTOPB) mode |= ATMEL_US_NBSTOP_2; /* parity */ if (termios->c_cflag & PARENB) { /* Mark or Space parity */ if (termios->c_cflag & CMSPAR) { if (termios->c_cflag & PARODD) mode |= ATMEL_US_PAR_MARK; else mode |= ATMEL_US_PAR_SPACE; } else if (termios->c_cflag & PARODD) mode |= ATMEL_US_PAR_ODD; else mode |= ATMEL_US_PAR_EVEN; } else mode |= ATMEL_US_PAR_NONE; /* hardware handshake (RTS/CTS) */ if (termios->c_cflag & CRTSCTS) mode |= ATMEL_US_USMODE_HWHS; else mode |= ATMEL_US_USMODE_NORMAL; spin_lock_irqsave(&port->lock, flags); port->read_status_mask = ATMEL_US_OVRE; if (termios->c_iflag & INPCK) port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); if (termios->c_iflag & (BRKINT | PARMRK)) port->read_status_mask |= ATMEL_US_RXBRK; if (atmel_use_dma_rx(port)) /* need to enable error interrupts */ UART_PUT_IER(port, port->read_status_mask); /* * Characters to ignore */ port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); if (termios->c_iflag & IGNBRK) { port->ignore_status_mask |= ATMEL_US_RXBRK; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= ATMEL_US_OVRE; } /* TODO: Ignore all characters if CREAD is set.*/ /* update the per-port timeout */ uart_update_timeout(port, termios->c_cflag, baud); /* * save/disable interrupts. The tty layer will ensure that the * transmitter is empty if requested by the caller, so there's * no need to wait for it here. */ imr = UART_GET_IMR(port); UART_PUT_IDR(port, -1); /* disable receiver and transmitter */ UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS); /* Resetting serial mode to RS232 (0x0) */ mode &= ~ATMEL_US_USMODE; if (atmel_port->rs485.flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send); mode |= ATMEL_US_USMODE_RS485; } else { dev_dbg(port->dev, "Setting UART to RS232\n"); } /* set the parity, stop bits and data size */ UART_PUT_MR(port, mode); /* set the baud rate */ UART_PUT_BRGR(port, quot); UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); /* restore interrupts */ UART_PUT_IER(port, imr); /* CTS flow-control and modem-status interrupts */ if (UART_ENABLE_MS(port, termios->c_cflag)) port->ops->enable_ms(port); spin_unlock_irqrestore(&port->lock, flags); } /* * Return string describing the specified port */ static const char *atmel_type(struct uart_port *port) { return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL; } /* * Release the memory region(s) being used by 'port'. */ static void atmel_release_port(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); int size = pdev->resource[0].end - pdev->resource[0].start + 1; release_mem_region(port->mapbase, size); if (port->flags & UPF_IOREMAP) { iounmap(port->membase); port->membase = NULL; } } /* * Request the memory region(s) being used by 'port'. */ static int atmel_request_port(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); int size = pdev->resource[0].end - pdev->resource[0].start + 1; if (!request_mem_region(port->mapbase, size, "atmel_serial")) return -EBUSY; if (port->flags & UPF_IOREMAP) { port->membase = ioremap(port->mapbase, size); if (port->membase == NULL) { release_mem_region(port->mapbase, size); return -ENOMEM; } } return 0; } /* * Configure/autoconfigure the port. */ static void atmel_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) { port->type = PORT_ATMEL; atmel_request_port(port); } } /* * Verify the new serial_struct (for TIOCSSERIAL). */ static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser) { int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL) ret = -EINVAL; if (port->irq != ser->irq) ret = -EINVAL; if (ser->io_type != SERIAL_IO_MEM) ret = -EINVAL; if (port->uartclk / 16 != ser->baud_base) ret = -EINVAL; if ((void *)port->mapbase != ser->iomem_base) ret = -EINVAL; if (port->iobase != ser->port) ret = -EINVAL; if (ser->hub6 != 0) ret = -EINVAL; return ret; } #ifdef CONFIG_CONSOLE_POLL static int atmel_poll_get_char(struct uart_port *port) { while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY)) cpu_relax(); return UART_GET_CHAR(port); } static void atmel_poll_put_char(struct uart_port *port, unsigned char ch) { while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY)) cpu_relax(); UART_PUT_CHAR(port, ch); } #endif static int atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg) { struct serial_rs485 rs485conf; switch (cmd) { case TIOCSRS485: if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg, sizeof(rs485conf))) return -EFAULT; atmel_config_rs485(port, &rs485conf); break; case TIOCGRS485: if (copy_to_user((struct serial_rs485 *) arg, &(to_atmel_uart_port(port)->rs485), sizeof(rs485conf))) return -EFAULT; break; default: return -ENOIOCTLCMD; } return 0; } static struct uart_ops atmel_pops = { .tx_empty = atmel_tx_empty, .set_mctrl = atmel_set_mctrl, .get_mctrl = atmel_get_mctrl, .stop_tx = atmel_stop_tx, .start_tx = atmel_start_tx, .stop_rx = atmel_stop_rx, .enable_ms = atmel_enable_ms, .break_ctl = atmel_break_ctl, .startup = atmel_startup, .shutdown = atmel_shutdown, .flush_buffer = atmel_flush_buffer, .set_termios = atmel_set_termios, .type = atmel_type, .release_port = atmel_release_port, .request_port = atmel_request_port, .config_port = atmel_config_port, .verify_port = atmel_verify_port, .pm = atmel_serial_pm, .ioctl = atmel_ioctl, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = atmel_poll_get_char, .poll_put_char = atmel_poll_put_char, #endif }; /* * Configure the port from the platform device resource info. */ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port, struct platform_device *pdev) { struct uart_port *port = &atmel_port->uart; struct atmel_uart_data *data = pdev->dev.platform_data; port->iotype = UPIO_MEM; port->flags = UPF_BOOT_AUTOCONF; port->ops = &atmel_pops; port->fifosize = 1; port->line = pdev->id; port->dev = &pdev->dev; port->mapbase = pdev->resource[0].start; port->irq = pdev->resource[1].start; tasklet_init(&atmel_port->tasklet, atmel_tasklet_func, (unsigned long)port); memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); if (data->regs) /* Already mapped by setup code */ port->membase = data->regs; else { port->flags |= UPF_IOREMAP; port->membase = NULL; } /* for console, the clock could already be configured */ if (!atmel_port->clk) { atmel_port->clk = clk_get(&pdev->dev, "usart"); clk_enable(atmel_port->clk); port->uartclk = clk_get_rate(atmel_port->clk); clk_disable(atmel_port->clk); /* only enable clock when USART is in use */ } atmel_port->use_dma_rx = data->use_dma_rx; atmel_port->use_dma_tx = data->use_dma_tx; atmel_port->rs485 = data->rs485; /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */ if (atmel_port->rs485.flags & SER_RS485_ENABLED) atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; else if (atmel_use_dma_tx(port)) { port->fifosize = PDC_BUFFER_SIZE; atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; } else { atmel_port->tx_done_mask = ATMEL_US_TXRDY; } } /* * Register board-specific modem-control line handlers. */ void __init atmel_register_uart_fns(struct atmel_port_fns *fns) { if (fns->enable_ms) atmel_pops.enable_ms = fns->enable_ms; if (fns->get_mctrl) atmel_pops.get_mctrl = fns->get_mctrl; if (fns->set_mctrl) atmel_pops.set_mctrl = fns->set_mctrl; atmel_open_hook = fns->open; atmel_close_hook = fns->close; atmel_pops.pm = fns->pm; atmel_pops.set_wake = fns->set_wake; } #ifdef CONFIG_SERIAL_ATMEL_CONSOLE static void atmel_console_putchar(struct uart_port *port, int ch) { while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY)) cpu_relax(); UART_PUT_CHAR(port, ch); } /* * Interrupts are disabled on entering */ static void atmel_console_write(struct console *co, const char *s, u_int count) { struct uart_port *port = &atmel_ports[co->index].uart; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned int status, imr; unsigned int pdc_tx; /* * First, save IMR and then disable interrupts */ imr = UART_GET_IMR(port); UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask); /* Store PDC transmit status and disable it */ pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN; UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); uart_console_write(port, s, count, atmel_console_putchar); /* * Finally, wait for transmitter to become empty * and restore IMR */ do { status = UART_GET_CSR(port); } while (!(status & ATMEL_US_TXRDY)); /* Restore PDC transmit status */ if (pdc_tx) UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); /* set interrupts back the way they were */ UART_PUT_IER(port, imr); } /* * If the port was already initialised (eg, by a boot loader), * try to determine the current setup. */ static void __init atmel_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits) { unsigned int mr, quot; /* * If the baud rate generator isn't running, the port wasn't * initialized by the boot loader. */ quot = UART_GET_BRGR(port) & ATMEL_US_CD; if (!quot) return; mr = UART_GET_MR(port) & ATMEL_US_CHRL; if (mr == ATMEL_US_CHRL_8) *bits = 8; else *bits = 7; mr = UART_GET_MR(port) & ATMEL_US_PAR; if (mr == ATMEL_US_PAR_EVEN) *parity = 'e'; else if (mr == ATMEL_US_PAR_ODD) *parity = 'o'; /* * The serial core only rounds down when matching this to a * supported baud rate. Make sure we don't end up slightly * lower than one of those, as it would make us fall through * to a much lower baud rate than we really want. */ *baud = port->uartclk / (16 * (quot - 1)); } static int __init atmel_console_setup(struct console *co, char *options) { struct uart_port *port = &atmel_ports[co->index].uart; int baud = 115200; int bits = 8; int parity = 'n'; int flow = 'n'; if (port->membase == NULL) { /* Port not initialized yet - delay setup */ return -ENODEV; } clk_enable(atmel_ports[co->index].clk); UART_PUT_IDR(port, -1); UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else atmel_console_get_options(port, &baud, &parity, &bits); return uart_set_options(port, co, baud, parity, bits, flow); } static struct uart_driver atmel_uart; static struct console atmel_console = { .name = ATMEL_DEVICENAME, .write = atmel_console_write, .device = uart_console_device, .setup = atmel_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &atmel_uart, }; #define ATMEL_CONSOLE_DEVICE (&atmel_console) /* * Early console initialization (before VM subsystem initialized). */ static int __init atmel_console_init(void) { if (atmel_default_console_device) { add_preferred_console(ATMEL_DEVICENAME, atmel_default_console_device->id, NULL); atmel_init_port(&atmel_ports[atmel_default_console_device->id], atmel_default_console_device); register_console(&atmel_console); } return 0; } console_initcall(atmel_console_init); /* * Late console initialization. */ static int __init atmel_late_console_init(void) { if (atmel_default_console_device && !(atmel_console.flags & CON_ENABLED)) register_console(&atmel_console); return 0; } core_initcall(atmel_late_console_init); static inline bool atmel_is_console_port(struct uart_port *port) { return port->cons && port->cons->index == port->line; } #else #define ATMEL_CONSOLE_DEVICE NULL static inline bool atmel_is_console_port(struct uart_port *port) { return false; } #endif static struct uart_driver atmel_uart = { .owner = THIS_MODULE, .driver_name = "atmel_serial", .dev_name = ATMEL_DEVICENAME, .major = SERIAL_ATMEL_MAJOR, .minor = MINOR_START, .nr = ATMEL_MAX_UART, .cons = ATMEL_CONSOLE_DEVICE, }; #ifdef CONFIG_PM static bool atmel_serial_clk_will_stop(void) { #ifdef CONFIG_ARCH_AT91 return at91_suspend_entering_slow_clock(); #else return false; #endif } static int atmel_serial_suspend(struct platform_device *pdev, pm_message_t state) { struct uart_port *port = platform_get_drvdata(pdev); struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (atmel_is_console_port(port) && console_suspend_enabled) { /* Drain the TX shifter */ while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY)) cpu_relax(); } /* we can not wake up if we're running on slow clock */ atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); if (atmel_serial_clk_will_stop()) device_set_wakeup_enable(&pdev->dev, 0); uart_suspend_port(&atmel_uart, port); return 0; } static int atmel_serial_resume(struct platform_device *pdev) { struct uart_port *port = platform_get_drvdata(pdev); struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); uart_resume_port(&atmel_uart, port); device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); return 0; } #else #define atmel_serial_suspend NULL #define atmel_serial_resume NULL #endif static int __devinit atmel_serial_probe(struct platform_device *pdev) { struct atmel_uart_port *port; void *data; int ret; BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); port = &atmel_ports[pdev->id]; port->backup_imr = 0; atmel_init_port(port, pdev); if (!atmel_use_dma_rx(&port->uart)) { ret = -ENOMEM; data = kmalloc(sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE, GFP_KERNEL); if (!data) goto err_alloc_ring; port->rx_ring.buf = data; } ret = uart_add_one_port(&atmel_uart, &port->uart); if (ret) goto err_add_port; #ifdef CONFIG_SERIAL_ATMEL_CONSOLE if (atmel_is_console_port(&port->uart) && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { /* * The serial core enabled the clock for us, so undo * the clk_enable() in atmel_console_setup() */ clk_disable(port->clk); } #endif device_init_wakeup(&pdev->dev, 1); platform_set_drvdata(pdev, port); return 0; err_add_port: kfree(port->rx_ring.buf); port->rx_ring.buf = NULL; err_alloc_ring: if (!atmel_is_console_port(&port->uart)) { clk_put(port->clk); port->clk = NULL; } return ret; } static int __devexit atmel_serial_remove(struct platform_device *pdev) { struct uart_port *port = platform_get_drvdata(pdev); struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); int ret = 0; device_init_wakeup(&pdev->dev, 0); platform_set_drvdata(pdev, NULL); ret = uart_remove_one_port(&atmel_uart, port); tasklet_kill(&atmel_port->tasklet); kfree(atmel_port->rx_ring.buf); /* "port" is allocated statically, so we shouldn't free it */ clk_put(atmel_port->clk); return ret; } static struct platform_driver atmel_serial_driver = { .probe = atmel_serial_probe, .remove = __devexit_p(atmel_serial_remove), .suspend = atmel_serial_suspend, .resume = atmel_serial_resume, .driver = { .name = "atmel_usart", .owner = THIS_MODULE, }, }; static int __init atmel_serial_init(void) { int ret; ret = uart_register_driver(&atmel_uart); if (ret) return ret; ret = platform_driver_register(&atmel_serial_driver); if (ret) uart_unregister_driver(&atmel_uart); return ret; } static void __exit atmel_serial_exit(void) { platform_driver_unregister(&atmel_serial_driver); uart_unregister_driver(&atmel_uart); } module_init(atmel_serial_init); module_exit(atmel_serial_exit); MODULE_AUTHOR("Rick Bronson"); MODULE_DESCRIPTION("Atmel AT91 / AT32 serial port driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:atmel_usart");
gpl-2.0
1tush/huawei_u8850_kernel_ics
fs/ext3/inode.c
758
103106
/* * linux/fs/ext3/inode.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Goal-directed block allocation by Stephen Tweedie * (sct@redhat.com), 1993, 1998 * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) * * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000 */ #include <linux/module.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/ext3_jbd.h> #include <linux/jbd.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/mpage.h> #include <linux/uio.h> #include <linux/bio.h> #include <linux/fiemap.h> #include <linux/namei.h> #include "xattr.h" #include "acl.h" static int ext3_writepage_trans_blocks(struct inode *inode); /* * Test whether an inode is a fast symlink. */ static int ext3_inode_is_fast_symlink(struct inode *inode) { int ea_blocks = EXT3_I(inode)->i_file_acl ? (inode->i_sb->s_blocksize >> 9) : 0; return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); } /* * The ext3 forget function must perform a revoke if we are freeing data * which has been journaled. Metadata (eg. indirect blocks) must be * revoked in all cases. * * "bh" may be NULL: a metadata block may have been freed from memory * but there may still be a record of it in the journal, and that record * still needs to be revoked. */ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, struct buffer_head *bh, ext3_fsblk_t blocknr) { int err; might_sleep(); BUFFER_TRACE(bh, "enter"); jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " "data mode %lx\n", bh, is_metadata, inode->i_mode, test_opt(inode->i_sb, DATA_FLAGS)); /* Never use the revoke function if we are doing full data * journaling: there is no need to, and a V1 superblock won't * support it. Otherwise, only skip the revoke on un-journaled * data blocks. */ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA || (!is_metadata && !ext3_should_journal_data(inode))) { if (bh) { BUFFER_TRACE(bh, "call journal_forget"); return ext3_journal_forget(handle, bh); } return 0; } /* * data!=journal && (is_metadata || should_journal_data(inode)) */ BUFFER_TRACE(bh, "call ext3_journal_revoke"); err = ext3_journal_revoke(handle, blocknr, bh); if (err) ext3_abort(inode->i_sb, __func__, "error %d when attempting revoke", err); BUFFER_TRACE(bh, "exit"); return err; } /* * Work out how many blocks we need to proceed with the next chunk of a * truncate transaction. */ static unsigned long blocks_for_truncate(struct inode *inode) { unsigned long needed; needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); /* Give ourselves just enough room to cope with inodes in which * i_blocks is corrupt: we've seen disk corruptions in the past * which resulted in random data in an inode which looked enough * like a regular file for ext3 to try to delete it. Things * will go a bit crazy if that happens, but at least we should * try not to panic the whole kernel. */ if (needed < 2) needed = 2; /* But we need to bound the transaction so we don't overflow the * journal. */ if (needed > EXT3_MAX_TRANS_DATA) needed = EXT3_MAX_TRANS_DATA; return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; } /* * Truncate transactions can be complex and absolutely huge. So we need to * be able to restart the transaction at a conventient checkpoint to make * sure we don't overflow the journal. * * start_transaction gets us a new handle for a truncate transaction, * and extend_transaction tries to extend the existing one a bit. If * extend fails, we need to propagate the failure up and restart the * transaction in the top-level truncate loop. --sct */ static handle_t *start_transaction(struct inode *inode) { handle_t *result; result = ext3_journal_start(inode, blocks_for_truncate(inode)); if (!IS_ERR(result)) return result; ext3_std_error(inode->i_sb, PTR_ERR(result)); return result; } /* * Try to extend this transaction for the purposes of truncation. * * Returns 0 if we managed to create more room. If we can't create more * room, and the transaction must be restarted we return 1. */ static int try_to_extend_transaction(handle_t *handle, struct inode *inode) { if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS) return 0; if (!ext3_journal_extend(handle, blocks_for_truncate(inode))) return 0; return 1; } /* * Restart the transaction associated with *handle. This does a commit, * so before we call here everything must be consistently dirtied against * this transaction. */ static int truncate_restart_transaction(handle_t *handle, struct inode *inode) { int ret; jbd_debug(2, "restarting handle %p\n", handle); /* * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle * At this moment, get_block can be called only for blocks inside * i_size since page cache has been already dropped and writes are * blocked by i_mutex. So we can safely drop the truncate_mutex. */ mutex_unlock(&EXT3_I(inode)->truncate_mutex); ret = ext3_journal_restart(handle, blocks_for_truncate(inode)); mutex_lock(&EXT3_I(inode)->truncate_mutex); return ret; } /* * Called at the last iput() if i_nlink is zero. */ void ext3_delete_inode (struct inode * inode) { handle_t *handle; if (!is_bad_inode(inode)) dquot_initialize(inode); truncate_inode_pages(&inode->i_data, 0); if (is_bad_inode(inode)) goto no_delete; handle = start_transaction(inode); if (IS_ERR(handle)) { /* * If we're going to skip the normal cleanup, we still need to * make sure that the in-core orphan linked list is properly * cleaned up. */ ext3_orphan_del(NULL, inode); goto no_delete; } if (IS_SYNC(inode)) handle->h_sync = 1; inode->i_size = 0; if (inode->i_blocks) ext3_truncate(inode); /* * Kill off the orphan record which ext3_truncate created. * AKPM: I think this can be inside the above `if'. * Note that ext3_orphan_del() has to be able to cope with the * deletion of a non-existent orphan - this is because we don't * know if ext3_truncate() actually created an orphan record. * (Well, we could do this if we need to, but heck - it works) */ ext3_orphan_del(handle, inode); EXT3_I(inode)->i_dtime = get_seconds(); /* * One subtle ordering requirement: if anything has gone wrong * (transaction abort, IO errors, whatever), then we can still * do these next steps (the fs will already have been marked as * having errors), but we can't free the inode if the mark_dirty * fails. */ if (ext3_mark_inode_dirty(handle, inode)) /* If that failed, just do the required in-core inode clear. */ clear_inode(inode); else ext3_free_inode(handle, inode); ext3_journal_stop(handle); return; no_delete: clear_inode(inode); /* We must guarantee clearing of inode... */ } typedef struct { __le32 *p; __le32 key; struct buffer_head *bh; } Indirect; static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) { p->key = *(p->p = v); p->bh = bh; } static int verify_chain(Indirect *from, Indirect *to) { while (from <= to && from->key == *from->p) from++; return (from > to); } /** * ext3_block_to_path - parse the block number into array of offsets * @inode: inode in question (we are only interested in its superblock) * @i_block: block number to be parsed * @offsets: array to store the offsets in * @boundary: set this non-zero if the referred-to block is likely to be * followed (on disk) by an indirect block. * * To store the locations of file's data ext3 uses a data structure common * for UNIX filesystems - tree of pointers anchored in the inode, with * data blocks at leaves and indirect blocks in intermediate nodes. * This function translates the block number into path in that tree - * return value is the path length and @offsets[n] is the offset of * pointer to (n+1)th node in the nth one. If @block is out of range * (negative or too large) warning is printed and zero returned. * * Note: function doesn't find node addresses, so no IO is needed. All * we need to know is the capacity of indirect blocks (taken from the * inode->i_sb). */ /* * Portability note: the last comparison (check that we fit into triple * indirect block) is spelled differently, because otherwise on an * architecture with 32-bit longs and 8Kb pages we might get into trouble * if our filesystem had 8Kb blocks. We might use long long, but that would * kill us on x86. Oh, well, at least the sign propagation does not matter - * i_block would have to be negative in the very beginning, so we would not * get there at all. */ static int ext3_block_to_path(struct inode *inode, long i_block, int offsets[4], int *boundary) { int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb); int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb); const long direct_blocks = EXT3_NDIR_BLOCKS, indirect_blocks = ptrs, double_blocks = (1 << (ptrs_bits * 2)); int n = 0; int final = 0; if (i_block < 0) { ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0"); } else if (i_block < direct_blocks) { offsets[n++] = i_block; final = direct_blocks; } else if ( (i_block -= direct_blocks) < indirect_blocks) { offsets[n++] = EXT3_IND_BLOCK; offsets[n++] = i_block; final = ptrs; } else if ((i_block -= indirect_blocks) < double_blocks) { offsets[n++] = EXT3_DIND_BLOCK; offsets[n++] = i_block >> ptrs_bits; offsets[n++] = i_block & (ptrs - 1); final = ptrs; } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { offsets[n++] = EXT3_TIND_BLOCK; offsets[n++] = i_block >> (ptrs_bits * 2); offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); offsets[n++] = i_block & (ptrs - 1); final = ptrs; } else { ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big"); } if (boundary) *boundary = final - 1 - (i_block & (ptrs - 1)); return n; } /** * ext3_get_branch - read the chain of indirect blocks leading to data * @inode: inode in question * @depth: depth of the chain (1 - direct pointer, etc.) * @offsets: offsets of pointers in inode/indirect blocks * @chain: place to store the result * @err: here we store the error value * * Function fills the array of triples <key, p, bh> and returns %NULL * if everything went OK or the pointer to the last filled triple * (incomplete one) otherwise. Upon the return chain[i].key contains * the number of (i+1)-th block in the chain (as it is stored in memory, * i.e. little-endian 32-bit), chain[i].p contains the address of that * number (it points into struct inode for i==0 and into the bh->b_data * for i>0) and chain[i].bh points to the buffer_head of i-th indirect * block for i>0 and NULL for i==0. In other words, it holds the block * numbers of the chain, addresses they were taken from (and where we can * verify that chain did not change) and buffer_heads hosting these * numbers. * * Function stops when it stumbles upon zero pointer (absent block) * (pointer to last triple returned, *@err == 0) * or when it gets an IO error reading an indirect block * (ditto, *@err == -EIO) * or when it notices that chain had been changed while it was reading * (ditto, *@err == -EAGAIN) * or when it reads all @depth-1 indirect blocks successfully and finds * the whole chain, all way to the data (returns %NULL, *err == 0). */ static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets, Indirect chain[4], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; *err = 0; /* i_data is not going away, no lock needed */ add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets); if (!p->key) goto no_block; while (--depth) { bh = sb_bread(sb, le32_to_cpu(p->key)); if (!bh) goto failure; /* Reader: pointers */ if (!verify_chain(chain, p)) goto changed; add_chain(++p, bh, (__le32*)bh->b_data + *++offsets); /* Reader: end */ if (!p->key) goto no_block; } return NULL; changed: brelse(bh); *err = -EAGAIN; goto no_block; failure: *err = -EIO; no_block: return p; } /** * ext3_find_near - find a place for allocation with sufficient locality * @inode: owner * @ind: descriptor of indirect block. * * This function returns the preferred place for block allocation. * It is used when heuristic for sequential allocation fails. * Rules are: * + if there is a block to the left of our position - allocate near it. * + if pointer will live in indirect block - allocate near that block. * + if pointer will live in inode - allocate in the same * cylinder group. * * In the latter case we colour the starting block by the callers PID to * prevent it from clashing with concurrent allocations for a different inode * in the same block group. The PID is used here so that functionally related * files will be close-by on-disk. * * Caller must make sure that @ind is valid and will stay that way. */ static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind) { struct ext3_inode_info *ei = EXT3_I(inode); __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; __le32 *p; ext3_fsblk_t bg_start; ext3_grpblk_t colour; /* Try to find previous block */ for (p = ind->p - 1; p >= start; p--) { if (*p) return le32_to_cpu(*p); } /* No such thing, so let's try location of indirect block */ if (ind->bh) return ind->bh->b_blocknr; /* * It is going to be referred to from the inode itself? OK, just put it * into the same cylinder group then. */ bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group); colour = (current->pid % 16) * (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16); return bg_start + colour; } /** * ext3_find_goal - find a preferred place for allocation. * @inode: owner * @block: block we want * @partial: pointer to the last triple within a chain * * Normally this function find the preferred place for block allocation, * returns it. */ static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block, Indirect *partial) { struct ext3_block_alloc_info *block_i; block_i = EXT3_I(inode)->i_block_alloc_info; /* * try the heuristic for sequential allocation, * failing that at least try to get decent locality. */ if (block_i && (block == block_i->last_alloc_logical_block + 1) && (block_i->last_alloc_physical_block != 0)) { return block_i->last_alloc_physical_block + 1; } return ext3_find_near(inode, partial); } /** * ext3_blks_to_allocate: Look up the block map and count the number * of direct blocks need to be allocated for the given branch. * * @branch: chain of indirect blocks * @k: number of blocks need for indirect blocks * @blks: number of data blocks to be mapped. * @blocks_to_boundary: the offset in the indirect block * * return the total number of blocks to be allocate, including the * direct and indirect blocks. */ static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks, int blocks_to_boundary) { unsigned long count = 0; /* * Simple case, [t,d]Indirect block(s) has not allocated yet * then it's clear blocks on that path have not allocated */ if (k > 0) { /* right now we don't handle cross boundary allocation */ if (blks < blocks_to_boundary + 1) count += blks; else count += blocks_to_boundary + 1; return count; } count++; while (count < blks && count <= blocks_to_boundary && le32_to_cpu(*(branch[0].p + count)) == 0) { count++; } return count; } /** * ext3_alloc_blocks: multiple allocate blocks needed for a branch * @indirect_blks: the number of blocks need to allocate for indirect * blocks * * @new_blocks: on return it will store the new block numbers for * the indirect blocks(if needed) and the first direct block, * @blks: on return it will store the total number of allocated * direct blocks */ static int ext3_alloc_blocks(handle_t *handle, struct inode *inode, ext3_fsblk_t goal, int indirect_blks, int blks, ext3_fsblk_t new_blocks[4], int *err) { int target, i; unsigned long count = 0; int index = 0; ext3_fsblk_t current_block = 0; int ret = 0; /* * Here we try to allocate the requested multiple blocks at once, * on a best-effort basis. * To build a branch, we should allocate blocks for * the indirect blocks(if not allocated yet), and at least * the first direct block of this branch. That's the * minimum number of blocks need to allocate(required) */ target = blks + indirect_blks; while (1) { count = target; /* allocating blocks for indirect blocks and direct blocks */ current_block = ext3_new_blocks(handle,inode,goal,&count,err); if (*err) goto failed_out; target -= count; /* allocate blocks for indirect blocks */ while (index < indirect_blks && count) { new_blocks[index++] = current_block++; count--; } if (count > 0) break; } /* save the new block number for the first direct block */ new_blocks[index] = current_block; /* total number of blocks allocated for direct blocks */ ret = count; *err = 0; return ret; failed_out: for (i = 0; i <index; i++) ext3_free_blocks(handle, inode, new_blocks[i], 1); return ret; } /** * ext3_alloc_branch - allocate and set up a chain of blocks. * @inode: owner * @indirect_blks: number of allocated indirect blocks * @blks: number of allocated direct blocks * @offsets: offsets (in the blocks) to store the pointers to next. * @branch: place to store the chain in. * * This function allocates blocks, zeroes out all but the last one, * links them into chain and (if we are synchronous) writes them to disk. * In other words, it prepares a branch that can be spliced onto the * inode. It stores the information about that chain in the branch[], in * the same format as ext3_get_branch() would do. We are calling it after * we had read the existing part of chain and partial points to the last * triple of that (one with zero ->key). Upon the exit we have the same * picture as after the successful ext3_get_block(), except that in one * place chain is disconnected - *branch->p is still zero (we did not * set the last link), but branch->key contains the number that should * be placed into *branch->p to fill that gap. * * If allocation fails we free all blocks we've allocated (and forget * their buffer_heads) and return the error value the from failed * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain * as described above and return 0. */ static int ext3_alloc_branch(handle_t *handle, struct inode *inode, int indirect_blks, int *blks, ext3_fsblk_t goal, int *offsets, Indirect *branch) { int blocksize = inode->i_sb->s_blocksize; int i, n = 0; int err = 0; struct buffer_head *bh; int num; ext3_fsblk_t new_blocks[4]; ext3_fsblk_t current_block; num = ext3_alloc_blocks(handle, inode, goal, indirect_blks, *blks, new_blocks, &err); if (err) return err; branch[0].key = cpu_to_le32(new_blocks[0]); /* * metadata blocks and data blocks are allocated. */ for (n = 1; n <= indirect_blks; n++) { /* * Get buffer_head for parent block, zero it out * and set the pointer to new one, then send * parent to disk. */ bh = sb_getblk(inode->i_sb, new_blocks[n-1]); branch[n].bh = bh; lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); err = ext3_journal_get_create_access(handle, bh); if (err) { unlock_buffer(bh); brelse(bh); goto failed; } memset(bh->b_data, 0, blocksize); branch[n].p = (__le32 *) bh->b_data + offsets[n]; branch[n].key = cpu_to_le32(new_blocks[n]); *branch[n].p = branch[n].key; if ( n == indirect_blks) { current_block = new_blocks[n]; /* * End of chain, update the last new metablock of * the chain to point to the new allocated * data blocks numbers */ for (i=1; i < num; i++) *(branch[n].p + i) = cpu_to_le32(++current_block); } BUFFER_TRACE(bh, "marking uptodate"); set_buffer_uptodate(bh); unlock_buffer(bh); BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); err = ext3_journal_dirty_metadata(handle, bh); if (err) goto failed; } *blks = num; return err; failed: /* Allocation failed, free what we already allocated */ for (i = 1; i <= n ; i++) { BUFFER_TRACE(branch[i].bh, "call journal_forget"); ext3_journal_forget(handle, branch[i].bh); } for (i = 0; i <indirect_blks; i++) ext3_free_blocks(handle, inode, new_blocks[i], 1); ext3_free_blocks(handle, inode, new_blocks[i], num); return err; } /** * ext3_splice_branch - splice the allocated branch onto inode. * @inode: owner * @block: (logical) number of block we are adding * @chain: chain of indirect blocks (with a missing link - see * ext3_alloc_branch) * @where: location of missing link * @num: number of indirect blocks we are adding * @blks: number of direct blocks we are adding * * This function fills the missing link and does all housekeeping needed in * inode (->i_blocks, etc.). In case of success we end up with the full * chain to new block and return 0. */ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, Indirect *where, int num, int blks) { int i; int err = 0; struct ext3_block_alloc_info *block_i; ext3_fsblk_t current_block; struct ext3_inode_info *ei = EXT3_I(inode); block_i = ei->i_block_alloc_info; /* * If we're splicing into a [td]indirect block (as opposed to the * inode) then we need to get write access to the [td]indirect block * before the splice. */ if (where->bh) { BUFFER_TRACE(where->bh, "get_write_access"); err = ext3_journal_get_write_access(handle, where->bh); if (err) goto err_out; } /* That's it */ *where->p = where->key; /* * Update the host buffer_head or inode to point to more just allocated * direct blocks blocks */ if (num == 0 && blks > 1) { current_block = le32_to_cpu(where->key) + 1; for (i = 1; i < blks; i++) *(where->p + i ) = cpu_to_le32(current_block++); } /* * update the most recently allocated logical & physical block * in i_block_alloc_info, to assist find the proper goal block for next * allocation */ if (block_i) { block_i->last_alloc_logical_block = block + blks - 1; block_i->last_alloc_physical_block = le32_to_cpu(where[num].key) + blks - 1; } /* We are done with atomic stuff, now do the rest of housekeeping */ inode->i_ctime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, inode); /* ext3_mark_inode_dirty already updated i_sync_tid */ atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid); /* had we spliced it onto indirect block? */ if (where->bh) { /* * If we spliced it onto an indirect block, we haven't * altered the inode. Note however that if it is being spliced * onto an indirect block at the very end of the file (the * file is growing) then we *will* alter the inode to reflect * the new i_size. But that is not done here - it is done in * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode. */ jbd_debug(5, "splicing indirect only\n"); BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); err = ext3_journal_dirty_metadata(handle, where->bh); if (err) goto err_out; } else { /* * OK, we spliced it into the inode itself on a direct block. * Inode was dirtied above. */ jbd_debug(5, "splicing direct\n"); } return err; err_out: for (i = 1; i <= num; i++) { BUFFER_TRACE(where[i].bh, "call journal_forget"); ext3_journal_forget(handle, where[i].bh); ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1); } ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks); return err; } /* * Allocation strategy is simple: if we have to allocate something, we will * have to go the whole way to leaf. So let's do it before attaching anything * to tree, set linkage between the newborn blocks, write them if sync is * required, recheck the path, free and repeat if check fails, otherwise * set the last missing link (that will protect us from any truncate-generated * removals - all blocks on the path are immune now) and possibly force the * write on the parent block. * That has a nice additional property: no special recovery from the failed * allocations is needed - we simply release blocks and do not touch anything * reachable from inode. * * `handle' can be NULL if create == 0. * * The BKL may not be held on entry here. Be sure to take it early. * return > 0, # of blocks mapped or allocated. * return = 0, if plain lookup failed. * return < 0, error case. */ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result, int create) { int err = -EIO; int offsets[4]; Indirect chain[4]; Indirect *partial; ext3_fsblk_t goal; int indirect_blks; int blocks_to_boundary = 0; int depth; struct ext3_inode_info *ei = EXT3_I(inode); int count = 0; ext3_fsblk_t first_block = 0; J_ASSERT(handle != NULL || create == 0); depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary); if (depth == 0) goto out; partial = ext3_get_branch(inode, depth, offsets, chain, &err); /* Simplest case - block found, no allocation needed */ if (!partial) { first_block = le32_to_cpu(chain[depth - 1].key); clear_buffer_new(bh_result); count++; /*map more blocks*/ while (count < maxblocks && count <= blocks_to_boundary) { ext3_fsblk_t blk; if (!verify_chain(chain, chain + depth - 1)) { /* * Indirect block might be removed by * truncate while we were reading it. * Handling of that case: forget what we've * got now. Flag the err as EAGAIN, so it * will reread. */ err = -EAGAIN; count = 0; break; } blk = le32_to_cpu(*(chain[depth-1].p + count)); if (blk == first_block + count) count++; else break; } if (err != -EAGAIN) goto got_it; } /* Next simple case - plain lookup or failed read of indirect block */ if (!create || err == -EIO) goto cleanup; mutex_lock(&ei->truncate_mutex); /* * If the indirect block is missing while we are reading * the chain(ext3_get_branch() returns -EAGAIN err), or * if the chain has been changed after we grab the semaphore, * (either because another process truncated this branch, or * another get_block allocated this branch) re-grab the chain to see if * the request block has been allocated or not. * * Since we already block the truncate/other get_block * at this point, we will have the current copy of the chain when we * splice the branch into the tree. */ if (err == -EAGAIN || !verify_chain(chain, partial)) { while (partial > chain) { brelse(partial->bh); partial--; } partial = ext3_get_branch(inode, depth, offsets, chain, &err); if (!partial) { count++; mutex_unlock(&ei->truncate_mutex); if (err) goto cleanup; clear_buffer_new(bh_result); goto got_it; } } /* * Okay, we need to do block allocation. Lazily initialize the block * allocation info here if necessary */ if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) ext3_init_block_alloc_info(inode); goal = ext3_find_goal(inode, iblock, partial); /* the number of blocks need to allocate for [d,t]indirect blocks */ indirect_blks = (chain + depth) - partial - 1; /* * Next look up the indirect map to count the totoal number of * direct blocks to allocate for this branch. */ count = ext3_blks_to_allocate(partial, indirect_blks, maxblocks, blocks_to_boundary); /* * Block out ext3_truncate while we alter the tree */ err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal, offsets + (partial - chain), partial); /* * The ext3_splice_branch call will free and forget any buffers * on the new chain if there is a failure, but that risks using * up transaction credits, especially for bitmaps where the * credits cannot be returned. Can we handle this somehow? We * may need to return -EAGAIN upwards in the worst case. --sct */ if (!err) err = ext3_splice_branch(handle, inode, iblock, partial, indirect_blks, count); mutex_unlock(&ei->truncate_mutex); if (err) goto cleanup; set_buffer_new(bh_result); got_it: map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); if (count > blocks_to_boundary) set_buffer_boundary(bh_result); err = count; /* Clean up and exit */ partial = chain + depth - 1; /* the whole chain */ cleanup: while (partial > chain) { BUFFER_TRACE(partial->bh, "call brelse"); brelse(partial->bh); partial--; } BUFFER_TRACE(bh_result, "returned"); out: return err; } /* Maximum number of blocks we map for direct IO at once. */ #define DIO_MAX_BLOCKS 4096 /* * Number of credits we need for writing DIO_MAX_BLOCKS: * We need sb + group descriptor + bitmap + inode -> 4 * For B blocks with A block pointers per block we need: * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect). * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25. */ #define DIO_CREDITS 25 static int ext3_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { handle_t *handle = ext3_journal_current_handle(); int ret = 0, started = 0; unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; if (create && !handle) { /* Direct IO write... */ if (max_blocks > DIO_MAX_BLOCKS) max_blocks = DIO_MAX_BLOCKS; handle = ext3_journal_start(inode, DIO_CREDITS + EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } started = 1; } ret = ext3_get_blocks_handle(handle, inode, iblock, max_blocks, bh_result, create); if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); ret = 0; } if (started) ext3_journal_stop(handle); out: return ret; } int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { return generic_block_fiemap(inode, fieinfo, start, len, ext3_get_block); } /* * `handle' can be NULL if create is zero */ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, long block, int create, int *errp) { struct buffer_head dummy; int fatal = 0, err; J_ASSERT(handle != NULL || create == 0); dummy.b_state = 0; dummy.b_blocknr = -1000; buffer_trace_init(&dummy.b_history); err = ext3_get_blocks_handle(handle, inode, block, 1, &dummy, create); /* * ext3_get_blocks_handle() returns number of blocks * mapped. 0 in case of a HOLE. */ if (err > 0) { if (err > 1) WARN_ON(1); err = 0; } *errp = err; if (!err && buffer_mapped(&dummy)) { struct buffer_head *bh; bh = sb_getblk(inode->i_sb, dummy.b_blocknr); if (!bh) { *errp = -EIO; goto err; } if (buffer_new(&dummy)) { J_ASSERT(create != 0); J_ASSERT(handle != NULL); /* * Now that we do not always journal data, we should * keep in mind whether this should always journal the * new buffer as metadata. For now, regular file * writes use ext3_get_block instead, so it's not a * problem. */ lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); fatal = ext3_journal_get_create_access(handle, bh); if (!fatal && !buffer_uptodate(bh)) { memset(bh->b_data,0,inode->i_sb->s_blocksize); set_buffer_uptodate(bh); } unlock_buffer(bh); BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); err = ext3_journal_dirty_metadata(handle, bh); if (!fatal) fatal = err; } else { BUFFER_TRACE(bh, "not a new buffer"); } if (fatal) { *errp = fatal; brelse(bh); bh = NULL; } return bh; } err: return NULL; } struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode, int block, int create, int *err) { struct buffer_head * bh; bh = ext3_getblk(handle, inode, block, create, err); if (!bh) return bh; if (buffer_uptodate(bh)) return bh; ll_rw_block(READ_META, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; put_bh(bh); *err = -EIO; return NULL; } static int walk_page_buffers( handle_t *handle, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)( handle_t *handle, struct buffer_head *bh)) { struct buffer_head *bh; unsigned block_start, block_end; unsigned blocksize = head->b_size; int err, ret = 0; struct buffer_head *next; for ( bh = head, block_start = 0; ret == 0 && (bh != head || !block_start); block_start = block_end, bh = next) { next = bh->b_this_page; block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (partial && !buffer_uptodate(bh)) *partial = 1; continue; } err = (*fn)(handle, bh); if (!ret) ret = err; } return ret; } /* * To preserve ordering, it is essential that the hole instantiation and * the data write be encapsulated in a single transaction. We cannot * close off a transaction and start a new one between the ext3_get_block() * and the commit_write(). So doing the journal_start at the start of * prepare_write() is the right place. * * Also, this function can nest inside ext3_writepage() -> * block_write_full_page(). In that case, we *know* that ext3_writepage() * has generated enough buffer credits to do the whole page. So we won't * block on the journal in that case, which is good, because the caller may * be PF_MEMALLOC. * * By accident, ext3 can be reentered when a transaction is open via * quota file writes. If we were to commit the transaction while thus * reentered, there can be a deadlock - we would be holding a quota * lock, and the commit would never complete if another thread had a * transaction open and was blocking on the quota lock - a ranking * violation. * * So what we do is to rely on the fact that journal_stop/journal_start * will _not_ run commit under these circumstances because handle->h_ref * is elevated. We'll still have enough credits for the tiny quotafile * write. */ static int do_journal_get_write_access(handle_t *handle, struct buffer_head *bh) { if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; return ext3_journal_get_write_access(handle, bh); } /* * Truncate blocks that were not used by write. We have to truncate the * pagecache as well so that corresponding buffers get properly unmapped. */ static void ext3_truncate_failed_write(struct inode *inode) { truncate_inode_pages(inode->i_mapping, inode->i_size); ext3_truncate(inode); } static int ext3_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; int ret; handle_t *handle; int retries = 0; struct page *page; pgoff_t index; unsigned from, to; /* Reserve one block more for addition to orphan list in case * we allocate blocks but write fails for some reason */ int needed_blocks = ext3_writepage_trans_blocks(inode) + 1; index = pos >> PAGE_CACHE_SHIFT; from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; retry: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; handle = ext3_journal_start(inode, needed_blocks); if (IS_ERR(handle)) { unlock_page(page); page_cache_release(page); ret = PTR_ERR(handle); goto out; } ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, ext3_get_block); if (ret) goto write_begin_failed; if (ext3_should_journal_data(inode)) { ret = walk_page_buffers(handle, page_buffers(page), from, to, NULL, do_journal_get_write_access); } write_begin_failed: if (ret) { /* * block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. * * Add inode to orphan list in case we crash before truncate * finishes. Do this only if ext3_can_truncate() agrees so * that orphan processing code is happy. */ if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_orphan_add(handle, inode); ext3_journal_stop(handle); unlock_page(page); page_cache_release(page); if (pos + len > inode->i_size) ext3_truncate_failed_write(inode); } if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) goto retry; out: return ret; } int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) { int err = journal_dirty_data(handle, bh); if (err) ext3_journal_abort_handle(__func__, __func__, bh, handle, err); return err; } /* For ordered writepage and write_end functions */ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) { /* * Write could have mapped the buffer but it didn't copy the data in * yet. So avoid filing such buffer into a transaction. */ if (buffer_mapped(bh) && buffer_uptodate(bh)) return ext3_journal_dirty_data(handle, bh); return 0; } /* For write_end() in data=journal mode */ static int write_end_fn(handle_t *handle, struct buffer_head *bh) { if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; set_buffer_uptodate(bh); return ext3_journal_dirty_metadata(handle, bh); } /* * This is nasty and subtle: ext3_write_begin() could have allocated blocks * for the whole page but later we failed to copy the data in. Update inode * size according to what we managed to copy. The rest is going to be * truncated in write_end function. */ static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied) { /* What matters to us is i_disksize. We don't write i_size anywhere */ if (pos + copied > inode->i_size) i_size_write(inode, pos + copied); if (pos + copied > EXT3_I(inode)->i_disksize) { EXT3_I(inode)->i_disksize = pos + copied; mark_inode_dirty(inode); } } /* * We need to pick up the new inode size which generic_commit_write gave us * `file' can be NULL - eg, when called from page_symlink(). * * ext3 never places buffers on inode->i_mapping->private_list. metadata * buffers are managed internally. */ static int ext3_ordered_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext3_journal_current_handle(); struct inode *inode = file->f_mapping->host; unsigned from, to; int ret = 0, ret2; copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); from = pos & (PAGE_CACHE_SIZE - 1); to = from + copied; ret = walk_page_buffers(handle, page_buffers(page), from, to, NULL, journal_dirty_data_fn); if (ret == 0) update_file_sizes(inode, pos, copied); /* * There may be allocated blocks outside of i_size because * we failed to copy some data. Prepare for truncate. */ if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_orphan_add(handle, inode); ret2 = ext3_journal_stop(handle); if (!ret) ret = ret2; unlock_page(page); page_cache_release(page); if (pos + len > inode->i_size) ext3_truncate_failed_write(inode); return ret ? ret : copied; } static int ext3_writeback_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext3_journal_current_handle(); struct inode *inode = file->f_mapping->host; int ret; copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); update_file_sizes(inode, pos, copied); /* * There may be allocated blocks outside of i_size because * we failed to copy some data. Prepare for truncate. */ if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_orphan_add(handle, inode); ret = ext3_journal_stop(handle); unlock_page(page); page_cache_release(page); if (pos + len > inode->i_size) ext3_truncate_failed_write(inode); return ret ? ret : copied; } static int ext3_journalled_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext3_journal_current_handle(); struct inode *inode = mapping->host; int ret = 0, ret2; int partial = 0; unsigned from, to; from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; if (copied < len) { if (!PageUptodate(page)) copied = 0; page_zero_new_buffers(page, from + copied, to); to = from + copied; } ret = walk_page_buffers(handle, page_buffers(page), from, to, &partial, write_end_fn); if (!partial) SetPageUptodate(page); if (pos + copied > inode->i_size) i_size_write(inode, pos + copied); /* * There may be allocated blocks outside of i_size because * we failed to copy some data. Prepare for truncate. */ if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_orphan_add(handle, inode); ext3_set_inode_state(inode, EXT3_STATE_JDATA); if (inode->i_size > EXT3_I(inode)->i_disksize) { EXT3_I(inode)->i_disksize = inode->i_size; ret2 = ext3_mark_inode_dirty(handle, inode); if (!ret) ret = ret2; } ret2 = ext3_journal_stop(handle); if (!ret) ret = ret2; unlock_page(page); page_cache_release(page); if (pos + len > inode->i_size) ext3_truncate_failed_write(inode); return ret ? ret : copied; } /* * bmap() is special. It gets used by applications such as lilo and by * the swapper to find the on-disk block of a specific piece of data. * * Naturally, this is dangerous if the block concerned is still in the * journal. If somebody makes a swapfile on an ext3 data-journaling * filesystem and enables swap, then they may get a nasty shock when the * data getting swapped to that swapfile suddenly gets overwritten by * the original zero's written out previously to the journal and * awaiting writeback in the kernel's buffer cache. * * So, if we see any bmap calls here on a modified, data-journaled file, * take extra steps to flush any blocks which might be in the cache. */ static sector_t ext3_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; journal_t *journal; int err; if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) { /* * This is a REALLY heavyweight approach, but the use of * bmap on dirty files is expected to be extremely rare: * only if we run lilo or swapon on a freshly made file * do we expect this to happen. * * (bmap requires CAP_SYS_RAWIO so this does not * represent an unprivileged user DOS attack --- we'd be * in trouble if mortal users could trigger this path at * will.) * * NB. EXT3_STATE_JDATA is not set on files other than * regular files. If somebody wants to bmap a directory * or symlink and gets confused because the buffer * hasn't yet been flushed to disk, they deserve * everything they get. */ ext3_clear_inode_state(inode, EXT3_STATE_JDATA); journal = EXT3_JOURNAL(inode); journal_lock_updates(journal); err = journal_flush(journal); journal_unlock_updates(journal); if (err) return 0; } return generic_block_bmap(mapping,block,ext3_get_block); } static int bget_one(handle_t *handle, struct buffer_head *bh) { get_bh(bh); return 0; } static int bput_one(handle_t *handle, struct buffer_head *bh) { put_bh(bh); return 0; } static int buffer_unmapped(handle_t *handle, struct buffer_head *bh) { return !buffer_mapped(bh); } /* * Note that we always start a transaction even if we're not journalling * data. This is to preserve ordering: any hole instantiation within * __block_write_full_page -> ext3_get_block() should be journalled * along with the data so we don't crash and then get metadata which * refers to old data. * * In all journalling modes block_write_full_page() will start the I/O. * * Problem: * * ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> * ext3_writepage() * * Similar for: * * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ... * * Same applies to ext3_get_block(). We will deadlock on various things like * lock_journal and i_truncate_mutex. * * Setting PF_MEMALLOC here doesn't work - too many internal memory * allocations fail. * * 16May01: If we're reentered then journal_current_handle() will be * non-zero. We simply *return*. * * 1 July 2001: @@@ FIXME: * In journalled data mode, a data buffer may be metadata against the * current transaction. But the same file is part of a shared mapping * and someone does a writepage() on it. * * We will move the buffer onto the async_data list, but *after* it has * been dirtied. So there's a small window where we have dirty data on * BJ_Metadata. * * Note that this only applies to the last partial page in the file. The * bit which block_write_full_page() uses prepare/commit for. (That's * broken code anyway: it's wrong for msync()). * * It's a rare case: affects the final partial page, for journalled data * where the file is subject to bith write() and writepage() in the same * transction. To fix it we'll need a custom block_write_full_page(). * We'll probably need that anyway for journalling writepage() output. * * We don't honour synchronous mounts for writepage(). That would be * disastrous. Any write() or metadata operation will sync the fs for * us. * * AKPM2: if all the page's buffers are mapped to disk and !data=journal, * we don't need to open a transaction here. */ static int ext3_ordered_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct buffer_head *page_bufs; handle_t *handle = NULL; int ret = 0; int err; J_ASSERT(PageLocked(page)); WARN_ON_ONCE(IS_RDONLY(inode)); /* * We give up here if we're reentered, because it might be for a * different filesystem. */ if (ext3_journal_current_handle()) goto out_fail; if (!page_has_buffers(page)) { create_empty_buffers(page, inode->i_sb->s_blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); page_bufs = page_buffers(page); } else { page_bufs = page_buffers(page); if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE, NULL, buffer_unmapped)) { /* Provide NULL get_block() to catch bugs if buffers * weren't really mapped */ return block_write_full_page(page, NULL, wbc); } } handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out_fail; } walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL, bget_one); ret = block_write_full_page(page, ext3_get_block, wbc); /* * The page can become unlocked at any point now, and * truncate can then come in and change things. So we * can't touch *page from now on. But *page_bufs is * safe due to elevated refcount. */ /* * And attach them to the current transaction. But only if * block_write_full_page() succeeded. Otherwise they are unmapped, * and generally junk. */ if (ret == 0) { err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL, journal_dirty_data_fn); if (!ret) ret = err; } walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL, bput_one); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; out_fail: redirty_page_for_writepage(wbc, page); unlock_page(page); return ret; } static int ext3_writeback_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; handle_t *handle = NULL; int ret = 0; int err; J_ASSERT(PageLocked(page)); WARN_ON_ONCE(IS_RDONLY(inode)); if (ext3_journal_current_handle()) goto out_fail; if (page_has_buffers(page)) { if (!walk_page_buffers(NULL, page_buffers(page), 0, PAGE_CACHE_SIZE, NULL, buffer_unmapped)) { /* Provide NULL get_block() to catch bugs if buffers * weren't really mapped */ return block_write_full_page(page, NULL, wbc); } } handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out_fail; } if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) ret = nobh_writepage(page, ext3_get_block, wbc); else ret = block_write_full_page(page, ext3_get_block, wbc); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; out_fail: redirty_page_for_writepage(wbc, page); unlock_page(page); return ret; } static int ext3_journalled_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; handle_t *handle = NULL; int ret = 0; int err; J_ASSERT(PageLocked(page)); WARN_ON_ONCE(IS_RDONLY(inode)); if (ext3_journal_current_handle()) goto no_write; handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto no_write; } if (!page_has_buffers(page) || PageChecked(page)) { /* * It's mmapped pagecache. Add buffers and journal it. There * doesn't seem much point in redirtying the page here. */ ClearPageChecked(page); ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, ext3_get_block); if (ret != 0) { ext3_journal_stop(handle); goto out_unlock; } ret = walk_page_buffers(handle, page_buffers(page), 0, PAGE_CACHE_SIZE, NULL, do_journal_get_write_access); err = walk_page_buffers(handle, page_buffers(page), 0, PAGE_CACHE_SIZE, NULL, write_end_fn); if (ret == 0) ret = err; ext3_set_inode_state(inode, EXT3_STATE_JDATA); unlock_page(page); } else { /* * It may be a page full of checkpoint-mode buffers. We don't * really know unless we go poke around in the buffer_heads. * But block_write_full_page will do the right thing. */ ret = block_write_full_page(page, ext3_get_block, wbc); } err = ext3_journal_stop(handle); if (!ret) ret = err; out: return ret; no_write: redirty_page_for_writepage(wbc, page); out_unlock: unlock_page(page); goto out; } static int ext3_readpage(struct file *file, struct page *page) { return mpage_readpage(page, ext3_get_block); } static int ext3_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); } static void ext3_invalidatepage(struct page *page, unsigned long offset) { journal_t *journal = EXT3_JOURNAL(page->mapping->host); /* * If it's a full truncate we just forget about the pending dirtying */ if (offset == 0) ClearPageChecked(page); journal_invalidatepage(journal, page, offset); } static int ext3_releasepage(struct page *page, gfp_t wait) { journal_t *journal = EXT3_JOURNAL(page->mapping->host); WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; return journal_try_to_free_buffers(journal, page, wait); } /* * If the O_DIRECT write will extend the file then add this inode to the * orphan list. So recovery will truncate it back to the original size * if the machine crashes during the write. * * If the O_DIRECT write is intantiating holes inside i_size and the machine * crashes then stale disk data _may_ be exposed inside the file. But current * VFS code falls back into buffered path in that case so we are safe. */ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct ext3_inode_info *ei = EXT3_I(inode); handle_t *handle; ssize_t ret; int orphan = 0; size_t count = iov_length(iov, nr_segs); int retries = 0; if (rw == WRITE) { loff_t final_size = offset + count; if (final_size > inode->i_size) { /* Credits for sb + inode write */ handle = ext3_journal_start(inode, 2); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } ret = ext3_orphan_add(handle, inode); if (ret) { ext3_journal_stop(handle); goto out; } orphan = 1; ei->i_disksize = inode->i_size; ext3_journal_stop(handle); } } retry: ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, ext3_get_block, NULL); if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) goto retry; if (orphan) { int err; /* Credits for sb + inode write */ handle = ext3_journal_start(inode, 2); if (IS_ERR(handle)) { /* This is really bad luck. We've written the data * but cannot extend i_size. Truncate allocated blocks * and pretend the write failed... */ ext3_truncate(inode); ret = PTR_ERR(handle); goto out; } if (inode->i_nlink) ext3_orphan_del(handle, inode); if (ret > 0) { loff_t end = offset + ret; if (end > inode->i_size) { ei->i_disksize = end; i_size_write(inode, end); /* * We're going to return a positive `ret' * here due to non-zero-length I/O, so there's * no way of reporting error returns from * ext3_mark_inode_dirty() to userspace. So * ignore it. */ ext3_mark_inode_dirty(handle, inode); } } err = ext3_journal_stop(handle); if (ret == 0) ret = err; } out: return ret; } /* * Pages can be marked dirty completely asynchronously from ext3's journalling * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do * much here because ->set_page_dirty is called under VFS locks. The page is * not necessarily locked. * * We cannot just dirty the page and leave attached buffers clean, because the * buffers' dirty state is "definitive". We cannot just set the buffers dirty * or jbddirty because all the journalling code will explode. * * So what we do is to mark the page "pending dirty" and next time writepage * is called, propagate that into the buffers appropriately. */ static int ext3_journalled_set_page_dirty(struct page *page) { SetPageChecked(page); return __set_page_dirty_nobuffers(page); } static const struct address_space_operations ext3_ordered_aops = { .readpage = ext3_readpage, .readpages = ext3_readpages, .writepage = ext3_ordered_writepage, .sync_page = block_sync_page, .write_begin = ext3_write_begin, .write_end = ext3_ordered_write_end, .bmap = ext3_bmap, .invalidatepage = ext3_invalidatepage, .releasepage = ext3_releasepage, .direct_IO = ext3_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext3_writeback_aops = { .readpage = ext3_readpage, .readpages = ext3_readpages, .writepage = ext3_writeback_writepage, .sync_page = block_sync_page, .write_begin = ext3_write_begin, .write_end = ext3_writeback_write_end, .bmap = ext3_bmap, .invalidatepage = ext3_invalidatepage, .releasepage = ext3_releasepage, .direct_IO = ext3_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext3_journalled_aops = { .readpage = ext3_readpage, .readpages = ext3_readpages, .writepage = ext3_journalled_writepage, .sync_page = block_sync_page, .write_begin = ext3_write_begin, .write_end = ext3_journalled_write_end, .set_page_dirty = ext3_journalled_set_page_dirty, .bmap = ext3_bmap, .invalidatepage = ext3_invalidatepage, .releasepage = ext3_releasepage, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; void ext3_set_aops(struct inode *inode) { if (ext3_should_order_data(inode)) inode->i_mapping->a_ops = &ext3_ordered_aops; else if (ext3_should_writeback_data(inode)) inode->i_mapping->a_ops = &ext3_writeback_aops; else inode->i_mapping->a_ops = &ext3_journalled_aops; } /* * ext3_block_truncate_page() zeroes out a mapping from file offset `from' * up to the end of the block which corresponds to `from'. * This required during truncate. We need to physically zero the tail end * of that block so it doesn't yield old data if the file is later grown. */ static int ext3_block_truncate_page(handle_t *handle, struct page *page, struct address_space *mapping, loff_t from) { ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned blocksize, iblock, length, pos; struct inode *inode = mapping->host; struct buffer_head *bh; int err = 0; blocksize = inode->i_sb->s_blocksize; length = blocksize - (offset & (blocksize - 1)); iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); /* * For "nobh" option, we can only work if we don't need to * read-in the page - otherwise we create buffers to do the IO. */ if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode) && PageUptodate(page)) { zero_user(page, offset, length); set_page_dirty(page); goto unlock; } if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); /* Find the buffer that contains "offset" */ bh = page_buffers(page); pos = blocksize; while (offset >= pos) { bh = bh->b_this_page; iblock++; pos += blocksize; } err = 0; if (buffer_freed(bh)) { BUFFER_TRACE(bh, "freed: skip"); goto unlock; } if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "unmapped"); ext3_get_block(inode, iblock, bh, 0); /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "still unmapped"); goto unlock; } } /* Ok, it's mapped. Make sure it's up-to-date */ if (PageUptodate(page)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh)) { err = -EIO; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; } if (ext3_should_journal_data(inode)) { BUFFER_TRACE(bh, "get write access"); err = ext3_journal_get_write_access(handle, bh); if (err) goto unlock; } zero_user(page, offset, length); BUFFER_TRACE(bh, "zeroed end of block"); err = 0; if (ext3_should_journal_data(inode)) { err = ext3_journal_dirty_metadata(handle, bh); } else { if (ext3_should_order_data(inode)) err = ext3_journal_dirty_data(handle, bh); mark_buffer_dirty(bh); } unlock: unlock_page(page); page_cache_release(page); return err; } /* * Probably it should be a library function... search for first non-zero word * or memcmp with zero_page, whatever is better for particular architecture. * Linus? */ static inline int all_zeroes(__le32 *p, __le32 *q) { while (p < q) if (*p++) return 0; return 1; } /** * ext3_find_shared - find the indirect blocks for partial truncation. * @inode: inode in question * @depth: depth of the affected branch * @offsets: offsets of pointers in that branch (see ext3_block_to_path) * @chain: place to store the pointers to partial indirect blocks * @top: place to the (detached) top of branch * * This is a helper function used by ext3_truncate(). * * When we do truncate() we may have to clean the ends of several * indirect blocks but leave the blocks themselves alive. Block is * partially truncated if some data below the new i_size is refered * from it (and it is on the path to the first completely truncated * data block, indeed). We have to free the top of that path along * with everything to the right of the path. Since no allocation * past the truncation point is possible until ext3_truncate() * finishes, we may safely do the latter, but top of branch may * require special attention - pageout below the truncation point * might try to populate it. * * We atomically detach the top of branch from the tree, store the * block number of its root in *@top, pointers to buffer_heads of * partially truncated blocks - in @chain[].bh and pointers to * their last elements that should not be removed - in * @chain[].p. Return value is the pointer to last filled element * of @chain. * * The work left to caller to do the actual freeing of subtrees: * a) free the subtree starting from *@top * b) free the subtrees whose roots are stored in * (@chain[i].p+1 .. end of @chain[i].bh->b_data) * c) free the subtrees growing from the inode past the @chain[0]. * (no partially truncated stuff there). */ static Indirect *ext3_find_shared(struct inode *inode, int depth, int offsets[4], Indirect chain[4], __le32 *top) { Indirect *partial, *p; int k, err; *top = 0; /* Make k index the deepest non-null offset + 1 */ for (k = depth; k > 1 && !offsets[k-1]; k--) ; partial = ext3_get_branch(inode, k, offsets, chain, &err); /* Writer: pointers */ if (!partial) partial = chain + k-1; /* * If the branch acquired continuation since we've looked at it - * fine, it should all survive and (new) top doesn't belong to us. */ if (!partial->key && *partial->p) /* Writer: end */ goto no_top; for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--) ; /* * OK, we've found the last block that must survive. The rest of our * branch should be detached before unlocking. However, if that rest * of branch is all ours and does not grow immediately from the inode * it's easier to cheat and just decrement partial->p. */ if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; /* Nope, don't do this in ext3. Must leave the tree intact */ #if 0 *p->p = 0; #endif } /* Writer: end */ while(partial > p) { brelse(partial->bh); partial--; } no_top: return partial; } /* * Zero a number of block pointers in either an inode or an indirect block. * If we restart the transaction we must again get write access to the * indirect block for further modification. * * We release `count' blocks on disk, but (last - first) may be greater * than `count' because there can be holes in there. */ static void ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext3_fsblk_t block_to_free, unsigned long count, __le32 *first, __le32 *last) { __le32 *p; if (try_to_extend_transaction(handle, inode)) { if (bh) { BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); ext3_journal_dirty_metadata(handle, bh); } ext3_mark_inode_dirty(handle, inode); truncate_restart_transaction(handle, inode); if (bh) { BUFFER_TRACE(bh, "retaking write access"); ext3_journal_get_write_access(handle, bh); } } /* * Any buffers which are on the journal will be in memory. We find * them on the hash table so journal_revoke() will run journal_forget() * on them. We've already detached each block from the file, so * bforget() in journal_forget() should be safe. * * AKPM: turn on bforget in journal_forget()!!! */ for (p = first; p < last; p++) { u32 nr = le32_to_cpu(*p); if (nr) { struct buffer_head *bh; *p = 0; bh = sb_find_get_block(inode->i_sb, nr); ext3_forget(handle, 0, inode, bh, nr); } } ext3_free_blocks(handle, inode, block_to_free, count); } /** * ext3_free_data - free a list of data blocks * @handle: handle for this transaction * @inode: inode we are dealing with * @this_bh: indirect buffer_head which contains *@first and *@last * @first: array of block numbers * @last: points immediately past the end of array * * We are freeing all blocks refered from that array (numbers are stored as * little-endian 32-bit) and updating @inode->i_blocks appropriately. * * We accumulate contiguous runs of blocks to free. Conveniently, if these * blocks are contiguous then releasing them at one time will only affect one * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't * actually use a lot of journal space. * * @this_bh will be %NULL if @first and @last point into the inode's direct * block pointers. */ static void ext3_free_data(handle_t *handle, struct inode *inode, struct buffer_head *this_bh, __le32 *first, __le32 *last) { ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */ unsigned long count = 0; /* Number of blocks in the run */ __le32 *block_to_free_p = NULL; /* Pointer into inode/ind corresponding to block_to_free */ ext3_fsblk_t nr; /* Current block # */ __le32 *p; /* Pointer into inode/ind for current block */ int err; if (this_bh) { /* For indirect block */ BUFFER_TRACE(this_bh, "get_write_access"); err = ext3_journal_get_write_access(handle, this_bh); /* Important: if we can't update the indirect pointers * to the blocks, we can't free them. */ if (err) return; } for (p = first; p < last; p++) { nr = le32_to_cpu(*p); if (nr) { /* accumulate blocks to free if they're contiguous */ if (count == 0) { block_to_free = nr; block_to_free_p = p; count = 1; } else if (nr == block_to_free + count) { count++; } else { ext3_clear_blocks(handle, inode, this_bh, block_to_free, count, block_to_free_p, p); block_to_free = nr; block_to_free_p = p; count = 1; } } } if (count > 0) ext3_clear_blocks(handle, inode, this_bh, block_to_free, count, block_to_free_p, p); if (this_bh) { BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata"); /* * The buffer head should have an attached journal head at this * point. However, if the data is corrupted and an indirect * block pointed to itself, it would have been detached when * the block was cleared. Check for this instead of OOPSing. */ if (bh2jh(this_bh)) ext3_journal_dirty_metadata(handle, this_bh); else ext3_error(inode->i_sb, "ext3_free_data", "circular indirect block detected, " "inode=%lu, block=%llu", inode->i_ino, (unsigned long long)this_bh->b_blocknr); } } /** * ext3_free_branches - free an array of branches * @handle: JBD handle for this transaction * @inode: inode we are dealing with * @parent_bh: the buffer_head which contains *@first and *@last * @first: array of block numbers * @last: pointer immediately past the end of array * @depth: depth of the branches to free * * We are freeing all blocks refered from these branches (numbers are * stored as little-endian 32-bit) and updating @inode->i_blocks * appropriately. */ static void ext3_free_branches(handle_t *handle, struct inode *inode, struct buffer_head *parent_bh, __le32 *first, __le32 *last, int depth) { ext3_fsblk_t nr; __le32 *p; if (is_handle_aborted(handle)) return; if (depth--) { struct buffer_head *bh; int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); p = last; while (--p >= first) { nr = le32_to_cpu(*p); if (!nr) continue; /* A hole */ /* Go read the buffer for the next level down */ bh = sb_bread(inode->i_sb, nr); /* * A read failure? Report error and clear slot * (should be rare). */ if (!bh) { ext3_error(inode->i_sb, "ext3_free_branches", "Read failure, inode=%lu, block="E3FSBLK, inode->i_ino, nr); continue; } /* This zaps the entire block. Bottom up. */ BUFFER_TRACE(bh, "free child branches"); ext3_free_branches(handle, inode, bh, (__le32*)bh->b_data, (__le32*)bh->b_data + addr_per_block, depth); /* * We've probably journalled the indirect block several * times during the truncate. But it's no longer * needed and we now drop it from the transaction via * journal_revoke(). * * That's easy if it's exclusively part of this * transaction. But if it's part of the committing * transaction then journal_forget() will simply * brelse() it. That means that if the underlying * block is reallocated in ext3_get_block(), * unmap_underlying_metadata() will find this block * and will try to get rid of it. damn, damn. * * If this block has already been committed to the * journal, a revoke record will be written. And * revoke records must be emitted *before* clearing * this block's bit in the bitmaps. */ ext3_forget(handle, 1, inode, bh, bh->b_blocknr); /* * Everything below this this pointer has been * released. Now let this top-of-subtree go. * * We want the freeing of this indirect block to be * atomic in the journal with the updating of the * bitmap block which owns it. So make some room in * the journal. * * We zero the parent pointer *after* freeing its * pointee in the bitmaps, so if extend_transaction() * for some reason fails to put the bitmap changes and * the release into the same transaction, recovery * will merely complain about releasing a free block, * rather than leaking blocks. */ if (is_handle_aborted(handle)) return; if (try_to_extend_transaction(handle, inode)) { ext3_mark_inode_dirty(handle, inode); truncate_restart_transaction(handle, inode); } ext3_free_blocks(handle, inode, nr, 1); if (parent_bh) { /* * The block which we have just freed is * pointed to by an indirect block: journal it */ BUFFER_TRACE(parent_bh, "get_write_access"); if (!ext3_journal_get_write_access(handle, parent_bh)){ *p = 0; BUFFER_TRACE(parent_bh, "call ext3_journal_dirty_metadata"); ext3_journal_dirty_metadata(handle, parent_bh); } } } } else { /* We have reached the bottom of the tree. */ BUFFER_TRACE(parent_bh, "free data blocks"); ext3_free_data(handle, inode, parent_bh, first, last); } } int ext3_can_truncate(struct inode *inode) { if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return 0; if (S_ISREG(inode->i_mode)) return 1; if (S_ISDIR(inode->i_mode)) return 1; if (S_ISLNK(inode->i_mode)) return !ext3_inode_is_fast_symlink(inode); return 0; } /* * ext3_truncate() * * We block out ext3_get_block() block instantiations across the entire * transaction, and VFS/VM ensures that ext3_truncate() cannot run * simultaneously on behalf of the same inode. * * As we work through the truncate and commmit bits of it to the journal there * is one core, guiding principle: the file's tree must always be consistent on * disk. We must be able to restart the truncate after a crash. * * The file's tree may be transiently inconsistent in memory (although it * probably isn't), but whenever we close off and commit a journal transaction, * the contents of (the filesystem + the journal) must be consistent and * restartable. It's pretty simple, really: bottom up, right to left (although * left-to-right works OK too). * * Note that at recovery time, journal replay occurs *before* the restart of * truncate against the orphan inode list. * * The committed inode has the new, desired i_size (which is the same as * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see * that this inode's truncate did not complete and it will again call * ext3_truncate() to have another go. So there will be instantiated blocks * to the right of the truncation point in a crashed ext3 filesystem. But * that's fine - as long as they are linked from the inode, the post-crash * ext3_truncate() run will find them and release them. */ void ext3_truncate(struct inode *inode) { handle_t *handle; struct ext3_inode_info *ei = EXT3_I(inode); __le32 *i_data = ei->i_data; int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); struct address_space *mapping = inode->i_mapping; int offsets[4]; Indirect chain[4]; Indirect *partial; __le32 nr = 0; int n; long last_block; unsigned blocksize = inode->i_sb->s_blocksize; struct page *page; if (!ext3_can_truncate(inode)) goto out_notrans; if (inode->i_size == 0 && ext3_should_writeback_data(inode)) ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE); /* * We have to lock the EOF page here, because lock_page() nests * outside journal_start(). */ if ((inode->i_size & (blocksize - 1)) == 0) { /* Block boundary? Nothing to do */ page = NULL; } else { page = grab_cache_page(mapping, inode->i_size >> PAGE_CACHE_SHIFT); if (!page) goto out_notrans; } handle = start_transaction(inode); if (IS_ERR(handle)) { if (page) { clear_highpage(page); flush_dcache_page(page); unlock_page(page); page_cache_release(page); } goto out_notrans; } last_block = (inode->i_size + blocksize-1) >> EXT3_BLOCK_SIZE_BITS(inode->i_sb); if (page) ext3_block_truncate_page(handle, page, mapping, inode->i_size); n = ext3_block_to_path(inode, last_block, offsets, NULL); if (n == 0) goto out_stop; /* error */ /* * OK. This truncate is going to happen. We add the inode to the * orphan list, so that if this truncate spans multiple transactions, * and we crash, we will resume the truncate when the filesystem * recovers. It also marks the inode dirty, to catch the new size. * * Implication: the file must always be in a sane, consistent * truncatable state while each transaction commits. */ if (ext3_orphan_add(handle, inode)) goto out_stop; /* * The orphan list entry will now protect us from any crash which * occurs before the truncate completes, so it is now safe to propagate * the new, shorter inode size (held for now in i_size) into the * on-disk inode. We do this via i_disksize, which is the value which * ext3 *really* writes onto the disk inode. */ ei->i_disksize = inode->i_size; /* * From here we block out all ext3_get_block() callers who want to * modify the block allocation tree. */ mutex_lock(&ei->truncate_mutex); if (n == 1) { /* direct blocks */ ext3_free_data(handle, inode, NULL, i_data+offsets[0], i_data + EXT3_NDIR_BLOCKS); goto do_indirects; } partial = ext3_find_shared(inode, n, offsets, chain, &nr); /* Kill the top of shared branch (not detached) */ if (nr) { if (partial == chain) { /* Shared branch grows from the inode */ ext3_free_branches(handle, inode, NULL, &nr, &nr+1, (chain+n-1) - partial); *partial->p = 0; /* * We mark the inode dirty prior to restart, * and prior to stop. No need for it here. */ } else { /* Shared branch grows from an indirect block */ BUFFER_TRACE(partial->bh, "get_write_access"); ext3_free_branches(handle, inode, partial->bh, partial->p, partial->p+1, (chain+n-1) - partial); } } /* Clear the ends of indirect blocks on the shared branch */ while (partial > chain) { ext3_free_branches(handle, inode, partial->bh, partial->p + 1, (__le32*)partial->bh->b_data+addr_per_block, (chain+n-1) - partial); BUFFER_TRACE(partial->bh, "call brelse"); brelse (partial->bh); partial--; } do_indirects: /* Kill the remaining (whole) subtrees */ switch (offsets[0]) { default: nr = i_data[EXT3_IND_BLOCK]; if (nr) { ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1); i_data[EXT3_IND_BLOCK] = 0; } case EXT3_IND_BLOCK: nr = i_data[EXT3_DIND_BLOCK]; if (nr) { ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2); i_data[EXT3_DIND_BLOCK] = 0; } case EXT3_DIND_BLOCK: nr = i_data[EXT3_TIND_BLOCK]; if (nr) { ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3); i_data[EXT3_TIND_BLOCK] = 0; } case EXT3_TIND_BLOCK: ; } ext3_discard_reservation(inode); mutex_unlock(&ei->truncate_mutex); inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, inode); /* * In a multi-transaction truncate, we only make the final transaction * synchronous */ if (IS_SYNC(inode)) handle->h_sync = 1; out_stop: /* * If this was a simple ftruncate(), and the file will remain alive * then we need to clear up the orphan record which we created above. * However, if this was a real unlink then we were called by * ext3_delete_inode(), and we allow that function to clean up the * orphan info for us. */ if (inode->i_nlink) ext3_orphan_del(handle, inode); ext3_journal_stop(handle); return; out_notrans: /* * Delete the inode from orphan list so that it doesn't stay there * forever and trigger assertion on umount. */ if (inode->i_nlink) ext3_orphan_del(NULL, inode); } static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb, unsigned long ino, struct ext3_iloc *iloc) { unsigned long block_group; unsigned long offset; ext3_fsblk_t block; struct ext3_group_desc *gdp; if (!ext3_valid_inum(sb, ino)) { /* * This error is already checked for in namei.c unless we are * looking at an NFS filehandle, in which case no error * report is needed */ return 0; } block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); gdp = ext3_get_group_desc(sb, block_group, NULL); if (!gdp) return 0; /* * Figure out the offset within the block group inode table */ offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) * EXT3_INODE_SIZE(sb); block = le32_to_cpu(gdp->bg_inode_table) + (offset >> EXT3_BLOCK_SIZE_BITS(sb)); iloc->block_group = block_group; iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1); return block; } /* * ext3_get_inode_loc returns with an extra refcount against the inode's * underlying buffer_head on success. If 'in_mem' is true, we have all * data in memory that is needed to recreate the on-disk version of this * inode. */ static int __ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc, int in_mem) { ext3_fsblk_t block; struct buffer_head *bh; block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc); if (!block) return -EIO; bh = sb_getblk(inode->i_sb, block); if (!bh) { ext3_error (inode->i_sb, "ext3_get_inode_loc", "unable to read inode block - " "inode=%lu, block="E3FSBLK, inode->i_ino, block); return -EIO; } if (!buffer_uptodate(bh)) { lock_buffer(bh); /* * If the buffer has the write error flag, we have failed * to write out another inode in the same block. In this * case, we don't have to read the block because we may * read the old inode data successfully. */ if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) set_buffer_uptodate(bh); if (buffer_uptodate(bh)) { /* someone brought it uptodate while we waited */ unlock_buffer(bh); goto has_buffer; } /* * If we have all information of the inode in memory and this * is the only valid inode in the block, we need not read the * block. */ if (in_mem) { struct buffer_head *bitmap_bh; struct ext3_group_desc *desc; int inodes_per_buffer; int inode_offset, i; int block_group; int start; block_group = (inode->i_ino - 1) / EXT3_INODES_PER_GROUP(inode->i_sb); inodes_per_buffer = bh->b_size / EXT3_INODE_SIZE(inode->i_sb); inode_offset = ((inode->i_ino - 1) % EXT3_INODES_PER_GROUP(inode->i_sb)); start = inode_offset & ~(inodes_per_buffer - 1); /* Is the inode bitmap in cache? */ desc = ext3_get_group_desc(inode->i_sb, block_group, NULL); if (!desc) goto make_io; bitmap_bh = sb_getblk(inode->i_sb, le32_to_cpu(desc->bg_inode_bitmap)); if (!bitmap_bh) goto make_io; /* * If the inode bitmap isn't in cache then the * optimisation may end up performing two reads instead * of one, so skip it. */ if (!buffer_uptodate(bitmap_bh)) { brelse(bitmap_bh); goto make_io; } for (i = start; i < start + inodes_per_buffer; i++) { if (i == inode_offset) continue; if (ext3_test_bit(i, bitmap_bh->b_data)) break; } brelse(bitmap_bh); if (i == start + inodes_per_buffer) { /* all other inodes are free, so skip I/O */ memset(bh->b_data, 0, bh->b_size); set_buffer_uptodate(bh); unlock_buffer(bh); goto has_buffer; } } make_io: /* * There are other valid inodes in the buffer, this inode * has in-inode xattrs, or we don't have this inode in memory. * Read the block from disk. */ get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(READ_META, bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { ext3_error(inode->i_sb, "ext3_get_inode_loc", "unable to read inode block - " "inode=%lu, block="E3FSBLK, inode->i_ino, block); brelse(bh); return -EIO; } } has_buffer: iloc->bh = bh; return 0; } int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc) { /* We have all inode data except xattrs in memory here. */ return __ext3_get_inode_loc(inode, iloc, !ext3_test_inode_state(inode, EXT3_STATE_XATTR)); } void ext3_set_inode_flags(struct inode *inode) { unsigned int flags = EXT3_I(inode)->i_flags; inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); if (flags & EXT3_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & EXT3_APPEND_FL) inode->i_flags |= S_APPEND; if (flags & EXT3_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; if (flags & EXT3_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & EXT3_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; } /* Propagate flags from i_flags to EXT3_I(inode)->i_flags */ void ext3_get_inode_flags(struct ext3_inode_info *ei) { unsigned int flags = ei->vfs_inode.i_flags; ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL| EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL); if (flags & S_SYNC) ei->i_flags |= EXT3_SYNC_FL; if (flags & S_APPEND) ei->i_flags |= EXT3_APPEND_FL; if (flags & S_IMMUTABLE) ei->i_flags |= EXT3_IMMUTABLE_FL; if (flags & S_NOATIME) ei->i_flags |= EXT3_NOATIME_FL; if (flags & S_DIRSYNC) ei->i_flags |= EXT3_DIRSYNC_FL; } struct inode *ext3_iget(struct super_block *sb, unsigned long ino) { struct ext3_iloc iloc; struct ext3_inode *raw_inode; struct ext3_inode_info *ei; struct buffer_head *bh; struct inode *inode; journal_t *journal = EXT3_SB(sb)->s_journal; transaction_t *transaction; long ret; int block; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ei = EXT3_I(inode); ei->i_block_alloc_info = NULL; ret = __ext3_get_inode_loc(inode, &iloc, 0); if (ret < 0) goto bad_inode; bh = iloc.bh; raw_inode = ext3_raw_inode(&iloc); inode->i_mode = le16_to_cpu(raw_inode->i_mode); inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); if(!(test_opt (inode->i_sb, NO_UID32))) { inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); inode->i_size = le32_to_cpu(raw_inode->i_size); inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime); inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime); inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime); inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0; ei->i_state_flags = 0; ei->i_dir_start_lookup = 0; ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); /* We now have enough fields to check if the inode was active or not. * This is needed because nfsd might try to access dead inodes * the test is that same one that e2fsck uses * NeilBrown 1999oct15 */ if (inode->i_nlink == 0) { if (inode->i_mode == 0 || !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) { /* this inode is deleted */ brelse (bh); ret = -ESTALE; goto bad_inode; } /* The only unlinked inodes we let through here have * valid i_mode and are being read by the orphan * recovery code: that's fine, we're about to complete * the process of deleting those. */ } inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); ei->i_flags = le32_to_cpu(raw_inode->i_flags); #ifdef EXT3_FRAGMENTS ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); ei->i_frag_no = raw_inode->i_frag; ei->i_frag_size = raw_inode->i_fsize; #endif ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); if (!S_ISREG(inode->i_mode)) { ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl); } else { inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32; } ei->i_disksize = inode->i_size; inode->i_generation = le32_to_cpu(raw_inode->i_generation); ei->i_block_group = iloc.block_group; /* * NOTE! The in-memory inode i_data array is in little-endian order * even on big-endian machines: we do NOT byteswap the block numbers! */ for (block = 0; block < EXT3_N_BLOCKS; block++) ei->i_data[block] = raw_inode->i_block[block]; INIT_LIST_HEAD(&ei->i_orphan); /* * Set transaction id's of transactions that have to be committed * to finish f[data]sync. We set them to currently running transaction * as we cannot be sure that the inode or some of its metadata isn't * part of the transaction - the inode could have been reclaimed and * now it is reread from disk. */ if (journal) { tid_t tid; spin_lock(&journal->j_state_lock); if (journal->j_running_transaction) transaction = journal->j_running_transaction; else transaction = journal->j_committing_transaction; if (transaction) tid = transaction->t_tid; else tid = journal->j_commit_sequence; spin_unlock(&journal->j_state_lock); atomic_set(&ei->i_sync_tid, tid); atomic_set(&ei->i_datasync_tid, tid); } if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 && EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) { /* * When mke2fs creates big inodes it does not zero out * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE, * so ignore those first few inodes. */ ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > EXT3_INODE_SIZE(inode->i_sb)) { brelse (bh); ret = -EIO; goto bad_inode; } if (ei->i_extra_isize == 0) { /* The extra space is currently unused. Use it. */ ei->i_extra_isize = sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE; } else { __le32 *magic = (void *)raw_inode + EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC)) ext3_set_inode_state(inode, EXT3_STATE_XATTR); } } else ei->i_extra_isize = 0; if (S_ISREG(inode->i_mode)) { inode->i_op = &ext3_file_inode_operations; inode->i_fop = &ext3_file_operations; ext3_set_aops(inode); } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ext3_dir_inode_operations; inode->i_fop = &ext3_dir_operations; } else if (S_ISLNK(inode->i_mode)) { if (ext3_inode_is_fast_symlink(inode)) { inode->i_op = &ext3_fast_symlink_inode_operations; nd_terminate_link(ei->i_data, inode->i_size, sizeof(ei->i_data) - 1); } else { inode->i_op = &ext3_symlink_inode_operations; ext3_set_aops(inode); } } else { inode->i_op = &ext3_special_inode_operations; if (raw_inode->i_block[0]) init_special_inode(inode, inode->i_mode, old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); else init_special_inode(inode, inode->i_mode, new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); } brelse (iloc.bh); ext3_set_inode_flags(inode); unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); return ERR_PTR(ret); } /* * Post the struct inode info into an on-disk inode location in the * buffer-cache. This gobbles the caller's reference to the * buffer_head in the inode location struct. * * The caller must have write access to iloc->bh. */ static int ext3_do_update_inode(handle_t *handle, struct inode *inode, struct ext3_iloc *iloc) { struct ext3_inode *raw_inode = ext3_raw_inode(iloc); struct ext3_inode_info *ei = EXT3_I(inode); struct buffer_head *bh = iloc->bh; int err = 0, rc, block; again: /* we can't allow multiple procs in here at once, its a bit racey */ lock_buffer(bh); /* For fields not not tracking in the in-memory inode, * initialise them to zero for new inodes. */ if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); ext3_get_inode_flags(ei); raw_inode->i_mode = cpu_to_le16(inode->i_mode); if(!(test_opt(inode->i_sb, NO_UID32))) { raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); /* * Fix up interoperability with old kernels. Otherwise, old inodes get * re-used with the upper 16 bits of the uid/gid intact */ if(!ei->i_dtime) { raw_inode->i_uid_high = cpu_to_le16(high_16_bits(inode->i_uid)); raw_inode->i_gid_high = cpu_to_le16(high_16_bits(inode->i_gid)); } else { raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } } else { raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(inode->i_uid)); raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(inode->i_gid)); raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); raw_inode->i_size = cpu_to_le32(ei->i_disksize); raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); raw_inode->i_blocks = cpu_to_le32(inode->i_blocks); raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); raw_inode->i_flags = cpu_to_le32(ei->i_flags); #ifdef EXT3_FRAGMENTS raw_inode->i_faddr = cpu_to_le32(ei->i_faddr); raw_inode->i_frag = ei->i_frag_no; raw_inode->i_fsize = ei->i_frag_size; #endif raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl); if (!S_ISREG(inode->i_mode)) { raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl); } else { raw_inode->i_size_high = cpu_to_le32(ei->i_disksize >> 32); if (ei->i_disksize > 0x7fffffffULL) { struct super_block *sb = inode->i_sb; if (!EXT3_HAS_RO_COMPAT_FEATURE(sb, EXT3_FEATURE_RO_COMPAT_LARGE_FILE) || EXT3_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT3_GOOD_OLD_REV)) { /* If this is the first large file * created, add a flag to the superblock. */ unlock_buffer(bh); err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh); if (err) goto out_brelse; ext3_update_dynamic_rev(sb); EXT3_SET_RO_COMPAT_FEATURE(sb, EXT3_FEATURE_RO_COMPAT_LARGE_FILE); handle->h_sync = 1; err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); /* get our lock and start over */ goto again; } } } raw_inode->i_generation = cpu_to_le32(inode->i_generation); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { if (old_valid_dev(inode->i_rdev)) { raw_inode->i_block[0] = cpu_to_le32(old_encode_dev(inode->i_rdev)); raw_inode->i_block[1] = 0; } else { raw_inode->i_block[0] = 0; raw_inode->i_block[1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); raw_inode->i_block[2] = 0; } } else for (block = 0; block < EXT3_N_BLOCKS; block++) raw_inode->i_block[block] = ei->i_data[block]; if (ei->i_extra_isize) raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); unlock_buffer(bh); rc = ext3_journal_dirty_metadata(handle, bh); if (!err) err = rc; ext3_clear_inode_state(inode, EXT3_STATE_NEW); atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid); out_brelse: brelse (bh); ext3_std_error(inode->i_sb, err); return err; } /* * ext3_write_inode() * * We are called from a few places: * * - Within generic_file_write() for O_SYNC files. * Here, there will be no transaction running. We wait for any running * trasnaction to commit. * * - Within sys_sync(), kupdate and such. * We wait on commit, if tol to. * * - Within prune_icache() (PF_MEMALLOC == true) * Here we simply return. We can't afford to block kswapd on the * journal commit. * * In all cases it is actually safe for us to return without doing anything, * because the inode has been copied into a raw inode buffer in * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for * knfsd. * * Note that we are absolutely dependent upon all inode dirtiers doing the * right thing: they *must* call mark_inode_dirty() after dirtying info in * which we are interested. * * It would be a bug for them to not do this. The code: * * mark_inode_dirty(inode) * stuff(); * inode->i_size = expr; * * is in error because a kswapd-driven write_inode() could occur while * `stuff()' is running, and the new i_size will be lost. Plus the inode * will no longer be on the superblock's dirty inode list. */ int ext3_write_inode(struct inode *inode, struct writeback_control *wbc) { if (current->flags & PF_MEMALLOC) return 0; if (ext3_journal_current_handle()) { jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); dump_stack(); return -EIO; } if (wbc->sync_mode != WB_SYNC_ALL) return 0; return ext3_force_commit(inode->i_sb); } /* * ext3_setattr() * * Called from notify_change. * * We want to trap VFS attempts to truncate the file as soon as * possible. In particular, we want to make sure that when the VFS * shrinks i_size, we put the inode on the orphan list and modify * i_disksize immediately, so that during the subsequent flushing of * dirty pages and freeing of disk blocks, we can guarantee that any * commit will leave the blocks being flushed in an unused state on * disk. (On recovery, the inode will get truncated and the blocks will * be freed, so we have a strong guarantee that no future commit will * leave these blocks visible to the user.) * * Called with inode->sem down. */ int ext3_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; int error, rc = 0; const unsigned int ia_valid = attr->ia_valid; error = inode_change_ok(inode, attr); if (error) return error; if (is_quota_modification(inode, attr)) dquot_initialize(inode); if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, * inode block, ? - but truncate inode update has it) */ handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } error = dquot_transfer(inode, attr); if (error) { ext3_journal_stop(handle); return error; } /* Update corresponding info in inode so that everything is in * one transaction */ if (attr->ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; error = ext3_mark_inode_dirty(handle, inode); ext3_journal_stop(handle); } if (S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { handle_t *handle; handle = ext3_journal_start(inode, 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } error = ext3_orphan_add(handle, inode); EXT3_I(inode)->i_disksize = attr->ia_size; rc = ext3_mark_inode_dirty(handle, inode); if (!error) error = rc; ext3_journal_stop(handle); } rc = inode_setattr(inode, attr); if (!rc && (ia_valid & ATTR_MODE)) rc = ext3_acl_chmod(inode); err_out: ext3_std_error(inode->i_sb, error); if (!error) error = rc; return error; } /* * How many blocks doth make a writepage()? * * With N blocks per page, it may be: * N data blocks * 2 indirect block * 2 dindirect * 1 tindirect * N+5 bitmap blocks (from the above) * N+5 group descriptor summary blocks * 1 inode block * 1 superblock. * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files * * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS * * With ordered or writeback data it's the same, less the N data blocks. * * If the inode's direct blocks can hold an integral number of pages then a * page cannot straddle two indirect blocks, and we can only touch one indirect * and dindirect block, and the "5" above becomes "3". * * This still overestimates under most circumstances. If we were to pass the * start and end offsets in here as well we could do block_to_path() on each * block and work out the exact number of indirects which are touched. Pah. */ static int ext3_writepage_trans_blocks(struct inode *inode) { int bpp = ext3_journal_blocks_per_page(inode); int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3; int ret; if (ext3_should_journal_data(inode)) ret = 3 * (bpp + indirects) + 2; else ret = 2 * (bpp + indirects) + 2; #ifdef CONFIG_QUOTA /* We know that structure was already allocated during dquot_initialize so * we will be updating only the data blocks + inodes */ ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); #endif return ret; } /* * The caller must have previously called ext3_reserve_inode_write(). * Give this, we know that the caller already has write access to iloc->bh. */ int ext3_mark_iloc_dirty(handle_t *handle, struct inode *inode, struct ext3_iloc *iloc) { int err = 0; /* the do_update_inode consumes one bh->b_count */ get_bh(iloc->bh); /* ext3_do_update_inode() does journal_dirty_metadata */ err = ext3_do_update_inode(handle, inode, iloc); put_bh(iloc->bh); return err; } /* * On success, We end up with an outstanding reference count against * iloc->bh. This _must_ be cleaned up later. */ int ext3_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext3_iloc *iloc) { int err = 0; if (handle) { err = ext3_get_inode_loc(inode, iloc); if (!err) { BUFFER_TRACE(iloc->bh, "get_write_access"); err = ext3_journal_get_write_access(handle, iloc->bh); if (err) { brelse(iloc->bh); iloc->bh = NULL; } } } ext3_std_error(inode->i_sb, err); return err; } /* * What we do here is to mark the in-core inode as clean with respect to inode * dirtiness (it may still be data-dirty). * This means that the in-core inode may be reaped by prune_icache * without having to perform any I/O. This is a very good thing, * because *any* task may call prune_icache - even ones which * have a transaction open against a different journal. * * Is this cheating? Not really. Sure, we haven't written the * inode out, but prune_icache isn't a user-visible syncing function. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) * we start and wait on commits. * * Is this efficient/effective? Well, we're being nice to the system * by cleaning up our inodes proactively so they can be reaped * without I/O. But we are potentially leaving up to five seconds' * worth of inodes floating about which prune_icache wants us to * write out. One way to fix that would be to get prune_icache() * to do a write_super() to free up some memory. It has the desired * effect. */ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) { struct ext3_iloc iloc; int err; might_sleep(); err = ext3_reserve_inode_write(handle, inode, &iloc); if (!err) err = ext3_mark_iloc_dirty(handle, inode, &iloc); return err; } /* * ext3_dirty_inode() is called from __mark_inode_dirty() * * We're really interested in the case where a file is being extended. * i_size has been changed by generic_commit_write() and we thus need * to include the updated inode in the current transaction. * * Also, dquot_alloc_space() will always dirty the inode when blocks * are allocated to the file. * * If the inode is marked synchronous, we don't honour that here - doing * so would cause a commit on atime updates, which we don't bother doing. * We handle synchronous inodes at the highest possible level. */ void ext3_dirty_inode(struct inode *inode) { handle_t *current_handle = ext3_journal_current_handle(); handle_t *handle; handle = ext3_journal_start(inode, 2); if (IS_ERR(handle)) goto out; if (current_handle && current_handle->h_transaction != handle->h_transaction) { /* This task has a transaction open against a different fs */ printk(KERN_EMERG "%s: transactions do not match!\n", __func__); } else { jbd_debug(5, "marking dirty. outer handle=%p\n", current_handle); ext3_mark_inode_dirty(handle, inode); } ext3_journal_stop(handle); out: return; } #if 0 /* * Bind an inode's backing buffer_head into this transaction, to prevent * it from being flushed to disk early. Unlike * ext3_reserve_inode_write, this leaves behind no bh reference and * returns no iloc structure, so the caller needs to repeat the iloc * lookup to mark the inode dirty later. */ static int ext3_pin_inode(handle_t *handle, struct inode *inode) { struct ext3_iloc iloc; int err = 0; if (handle) { err = ext3_get_inode_loc(inode, &iloc); if (!err) { BUFFER_TRACE(iloc.bh, "get_write_access"); err = journal_get_write_access(handle, iloc.bh); if (!err) err = ext3_journal_dirty_metadata(handle, iloc.bh); brelse(iloc.bh); } } ext3_std_error(inode->i_sb, err); return err; } #endif int ext3_change_inode_journal_flag(struct inode *inode, int val) { journal_t *journal; handle_t *handle; int err; /* * We have to be very careful here: changing a data block's * journaling status dynamically is dangerous. If we write a * data block to the journal, change the status and then delete * that block, we risk forgetting to revoke the old log record * from the journal and so a subsequent replay can corrupt data. * So, first we make sure that the journal is empty and that * nobody is changing anything. */ journal = EXT3_JOURNAL(inode); if (is_journal_aborted(journal)) return -EROFS; journal_lock_updates(journal); journal_flush(journal); /* * OK, there are no updates running now, and all cached data is * synced to disk. We are now in a completely consistent state * which doesn't have anything in the journal, and we know that * no filesystem updates are running, so it is safe to modify * the inode's in-core data-journaling state flag now. */ if (val) EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL; else EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL; ext3_set_aops(inode); journal_unlock_updates(journal); /* Finally we can mark the inode as dirty. */ handle = ext3_journal_start(inode, 1); if (IS_ERR(handle)) return PTR_ERR(handle); err = ext3_mark_inode_dirty(handle, inode); handle->h_sync = 1; ext3_journal_stop(handle); ext3_std_error(inode->i_sb, err); return err; }
gpl-2.0
sjp38/linux.rpi
arch/mips/cavium-octeon/csrc-octeon.c
1014
4529
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2007 by Ralf Baechle * Copyright (C) 2009, 2012 Cavium, Inc. */ #include <linux/clocksource.h> #include <linux/export.h> #include <linux/init.h> #include <linux/smp.h> #include <asm/cpu-info.h> #include <asm/cpu-type.h> #include <asm/time.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-ipd-defs.h> #include <asm/octeon/cvmx-mio-defs.h> static u64 f; static u64 rdiv; static u64 sdiv; static u64 octeon_udelay_factor; static u64 octeon_ndelay_factor; void __init octeon_setup_delays(void) { octeon_udelay_factor = octeon_get_clock_rate() / 1000000; /* * For __ndelay we divide by 2^16, so the factor is multiplied * by the same amount. */ octeon_ndelay_factor = (octeon_udelay_factor * 0x10000ull) / 1000ull; preset_lpj = octeon_get_clock_rate() / HZ; if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { union cvmx_mio_rst_boot rst_boot; rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); rdiv = rst_boot.s.c_mul; /* CPU clock */ sdiv = rst_boot.s.pnr_mul; /* I/O clock */ f = (0x8000000000000000ull / sdiv) * 2; } } /* * Set the current core's cvmcount counter to the value of the * IPD_CLK_COUNT. We do this on all cores as they are brought * on-line. This allows for a read from a local cpu register to * access a synchronized counter. * * On CPU_CAVIUM_OCTEON2 the IPD_CLK_COUNT is scaled by rdiv/sdiv. */ void octeon_init_cvmcount(void) { unsigned long flags; unsigned loops = 2; /* Clobber loops so GCC will not unroll the following while loop. */ asm("" : "+r" (loops)); local_irq_save(flags); /* * Loop several times so we are executing from the cache, * which should give more deterministic timing. */ while (loops--) { u64 ipd_clk_count = cvmx_read_csr(CVMX_IPD_CLK_COUNT); if (rdiv != 0) { ipd_clk_count *= rdiv; if (f != 0) { asm("dmultu\t%[cnt],%[f]\n\t" "mfhi\t%[cnt]" : [cnt] "+r" (ipd_clk_count) : [f] "r" (f) : "hi", "lo"); } } write_c0_cvmcount(ipd_clk_count); } local_irq_restore(flags); } static cycle_t octeon_cvmcount_read(struct clocksource *cs) { return read_c0_cvmcount(); } static struct clocksource clocksource_mips = { .name = "OCTEON_CVMCOUNT", .read = octeon_cvmcount_read, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; unsigned long long notrace sched_clock(void) { /* 64-bit arithmatic can overflow, so use 128-bit. */ u64 t1, t2, t3; unsigned long long rv; u64 mult = clocksource_mips.mult; u64 shift = clocksource_mips.shift; u64 cnt = read_c0_cvmcount(); asm ( "dmultu\t%[cnt],%[mult]\n\t" "nor\t%[t1],$0,%[shift]\n\t" "mfhi\t%[t2]\n\t" "mflo\t%[t3]\n\t" "dsll\t%[t2],%[t2],1\n\t" "dsrlv\t%[rv],%[t3],%[shift]\n\t" "dsllv\t%[t1],%[t2],%[t1]\n\t" "or\t%[rv],%[t1],%[rv]\n\t" : [rv] "=&r" (rv), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3) : [cnt] "r" (cnt), [mult] "r" (mult), [shift] "r" (shift) : "hi", "lo"); return rv; } void __init plat_time_init(void) { clocksource_mips.rating = 300; clocksource_register_hz(&clocksource_mips, octeon_get_clock_rate()); } void __udelay(unsigned long us) { u64 cur, end, inc; cur = read_c0_cvmcount(); inc = us * octeon_udelay_factor; end = cur + inc; while (end > cur) cur = read_c0_cvmcount(); } EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long ns) { u64 cur, end, inc; cur = read_c0_cvmcount(); inc = ((ns * octeon_ndelay_factor) >> 16); end = cur + inc; while (end > cur) cur = read_c0_cvmcount(); } EXPORT_SYMBOL(__ndelay); void __delay(unsigned long loops) { u64 cur, end; cur = read_c0_cvmcount(); end = cur + loops; while (end > cur) cur = read_c0_cvmcount(); } EXPORT_SYMBOL(__delay); /** * octeon_io_clk_delay - wait for a given number of io clock cycles to pass. * * We scale the wait by the clock ratio, and then wait for the * corresponding number of core clocks. * * @count: The number of clocks to wait. */ void octeon_io_clk_delay(unsigned long count) { u64 cur, end; cur = read_c0_cvmcount(); if (rdiv != 0) { end = count * rdiv; if (f != 0) { asm("dmultu\t%[cnt],%[f]\n\t" "mfhi\t%[cnt]" : [cnt] "+r" (end) : [f] "r" (f) : "hi", "lo"); } end = cur + end; } else { end = cur + count; } while (end > cur) cur = read_c0_cvmcount(); } EXPORT_SYMBOL(octeon_io_clk_delay);
gpl-2.0
hisilicon/linux-x5hd2
fs/ceph/caps.c
1526
86447
#include <linux/ceph/ceph_debug.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/wait.h> #include <linux/writeback.h> #include "super.h" #include "mds_client.h" #include <linux/ceph/decode.h> #include <linux/ceph/messenger.h> /* * Capability management * * The Ceph metadata servers control client access to inode metadata * and file data by issuing capabilities, granting clients permission * to read and/or write both inode field and file data to OSDs * (storage nodes). Each capability consists of a set of bits * indicating which operations are allowed. * * If the client holds a *_SHARED cap, the client has a coherent value * that can be safely read from the cached inode. * * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the * client is allowed to change inode attributes (e.g., file size, * mtime), note its dirty state in the ceph_cap, and asynchronously * flush that metadata change to the MDS. * * In the event of a conflicting operation (perhaps by another * client), the MDS will revoke the conflicting client capabilities. * * In order for a client to cache an inode, it must hold a capability * with at least one MDS server. When inodes are released, release * notifications are batched and periodically sent en masse to the MDS * cluster to release server state. */ /* * Generate readable cap strings for debugging output. */ #define MAX_CAP_STR 20 static char cap_str[MAX_CAP_STR][40]; static DEFINE_SPINLOCK(cap_str_lock); static int last_cap_str; static char *gcap_string(char *s, int c) { if (c & CEPH_CAP_GSHARED) *s++ = 's'; if (c & CEPH_CAP_GEXCL) *s++ = 'x'; if (c & CEPH_CAP_GCACHE) *s++ = 'c'; if (c & CEPH_CAP_GRD) *s++ = 'r'; if (c & CEPH_CAP_GWR) *s++ = 'w'; if (c & CEPH_CAP_GBUFFER) *s++ = 'b'; if (c & CEPH_CAP_GLAZYIO) *s++ = 'l'; return s; } const char *ceph_cap_string(int caps) { int i; char *s; int c; spin_lock(&cap_str_lock); i = last_cap_str++; if (last_cap_str == MAX_CAP_STR) last_cap_str = 0; spin_unlock(&cap_str_lock); s = cap_str[i]; if (caps & CEPH_CAP_PIN) *s++ = 'p'; c = (caps >> CEPH_CAP_SAUTH) & 3; if (c) { *s++ = 'A'; s = gcap_string(s, c); } c = (caps >> CEPH_CAP_SLINK) & 3; if (c) { *s++ = 'L'; s = gcap_string(s, c); } c = (caps >> CEPH_CAP_SXATTR) & 3; if (c) { *s++ = 'X'; s = gcap_string(s, c); } c = caps >> CEPH_CAP_SFILE; if (c) { *s++ = 'F'; s = gcap_string(s, c); } if (s == cap_str[i]) *s++ = '-'; *s = 0; return cap_str[i]; } void ceph_caps_init(struct ceph_mds_client *mdsc) { INIT_LIST_HEAD(&mdsc->caps_list); spin_lock_init(&mdsc->caps_list_lock); } void ceph_caps_finalize(struct ceph_mds_client *mdsc) { struct ceph_cap *cap; spin_lock(&mdsc->caps_list_lock); while (!list_empty(&mdsc->caps_list)) { cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item); list_del(&cap->caps_item); kmem_cache_free(ceph_cap_cachep, cap); } mdsc->caps_total_count = 0; mdsc->caps_avail_count = 0; mdsc->caps_use_count = 0; mdsc->caps_reserve_count = 0; mdsc->caps_min_count = 0; spin_unlock(&mdsc->caps_list_lock); } void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta) { spin_lock(&mdsc->caps_list_lock); mdsc->caps_min_count += delta; BUG_ON(mdsc->caps_min_count < 0); spin_unlock(&mdsc->caps_list_lock); } int ceph_reserve_caps(struct ceph_mds_client *mdsc, struct ceph_cap_reservation *ctx, int need) { int i; struct ceph_cap *cap; int have; int alloc = 0; LIST_HEAD(newcaps); int ret = 0; dout("reserve caps ctx=%p need=%d\n", ctx, need); /* first reserve any caps that are already allocated */ spin_lock(&mdsc->caps_list_lock); if (mdsc->caps_avail_count >= need) have = need; else have = mdsc->caps_avail_count; mdsc->caps_avail_count -= have; mdsc->caps_reserve_count += have; BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + mdsc->caps_reserve_count + mdsc->caps_avail_count); spin_unlock(&mdsc->caps_list_lock); for (i = have; i < need; i++) { cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); if (!cap) { ret = -ENOMEM; goto out_alloc_count; } list_add(&cap->caps_item, &newcaps); alloc++; } BUG_ON(have + alloc != need); spin_lock(&mdsc->caps_list_lock); mdsc->caps_total_count += alloc; mdsc->caps_reserve_count += alloc; list_splice(&newcaps, &mdsc->caps_list); BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + mdsc->caps_reserve_count + mdsc->caps_avail_count); spin_unlock(&mdsc->caps_list_lock); ctx->count = need; dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n", ctx, mdsc->caps_total_count, mdsc->caps_use_count, mdsc->caps_reserve_count, mdsc->caps_avail_count); return 0; out_alloc_count: /* we didn't manage to reserve as much as we needed */ pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n", ctx, need, have); return ret; } int ceph_unreserve_caps(struct ceph_mds_client *mdsc, struct ceph_cap_reservation *ctx) { dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count); if (ctx->count) { spin_lock(&mdsc->caps_list_lock); BUG_ON(mdsc->caps_reserve_count < ctx->count); mdsc->caps_reserve_count -= ctx->count; mdsc->caps_avail_count += ctx->count; ctx->count = 0; dout("unreserve caps %d = %d used + %d resv + %d avail\n", mdsc->caps_total_count, mdsc->caps_use_count, mdsc->caps_reserve_count, mdsc->caps_avail_count); BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + mdsc->caps_reserve_count + mdsc->caps_avail_count); spin_unlock(&mdsc->caps_list_lock); } return 0; } static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc, struct ceph_cap_reservation *ctx) { struct ceph_cap *cap = NULL; /* temporary, until we do something about cap import/export */ if (!ctx) { cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); if (cap) { mdsc->caps_use_count++; mdsc->caps_total_count++; } return cap; } spin_lock(&mdsc->caps_list_lock); dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count, mdsc->caps_reserve_count, mdsc->caps_avail_count); BUG_ON(!ctx->count); BUG_ON(ctx->count > mdsc->caps_reserve_count); BUG_ON(list_empty(&mdsc->caps_list)); ctx->count--; mdsc->caps_reserve_count--; mdsc->caps_use_count++; cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item); list_del(&cap->caps_item); BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + mdsc->caps_reserve_count + mdsc->caps_avail_count); spin_unlock(&mdsc->caps_list_lock); return cap; } void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap) { spin_lock(&mdsc->caps_list_lock); dout("put_cap %p %d = %d used + %d resv + %d avail\n", cap, mdsc->caps_total_count, mdsc->caps_use_count, mdsc->caps_reserve_count, mdsc->caps_avail_count); mdsc->caps_use_count--; /* * Keep some preallocated caps around (ceph_min_count), to * avoid lots of free/alloc churn. */ if (mdsc->caps_avail_count >= mdsc->caps_reserve_count + mdsc->caps_min_count) { mdsc->caps_total_count--; kmem_cache_free(ceph_cap_cachep, cap); } else { mdsc->caps_avail_count++; list_add(&cap->caps_item, &mdsc->caps_list); } BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + mdsc->caps_reserve_count + mdsc->caps_avail_count); spin_unlock(&mdsc->caps_list_lock); } void ceph_reservation_status(struct ceph_fs_client *fsc, int *total, int *avail, int *used, int *reserved, int *min) { struct ceph_mds_client *mdsc = fsc->mdsc; if (total) *total = mdsc->caps_total_count; if (avail) *avail = mdsc->caps_avail_count; if (used) *used = mdsc->caps_use_count; if (reserved) *reserved = mdsc->caps_reserve_count; if (min) *min = mdsc->caps_min_count; } /* * Find ceph_cap for given mds, if any. * * Called with i_ceph_lock held. */ static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds) { struct ceph_cap *cap; struct rb_node *n = ci->i_caps.rb_node; while (n) { cap = rb_entry(n, struct ceph_cap, ci_node); if (mds < cap->mds) n = n->rb_left; else if (mds > cap->mds) n = n->rb_right; else return cap; } return NULL; } struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds) { struct ceph_cap *cap; spin_lock(&ci->i_ceph_lock); cap = __get_cap_for_mds(ci, mds); spin_unlock(&ci->i_ceph_lock); return cap; } /* * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1. */ static int __ceph_get_cap_mds(struct ceph_inode_info *ci) { struct ceph_cap *cap; int mds = -1; struct rb_node *p; /* prefer mds with WR|BUFFER|EXCL caps */ for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { cap = rb_entry(p, struct ceph_cap, ci_node); mds = cap->mds; if (cap->issued & (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_EXCL)) break; } return mds; } int ceph_get_cap_mds(struct inode *inode) { struct ceph_inode_info *ci = ceph_inode(inode); int mds; spin_lock(&ci->i_ceph_lock); mds = __ceph_get_cap_mds(ceph_inode(inode)); spin_unlock(&ci->i_ceph_lock); return mds; } /* * Called under i_ceph_lock. */ static void __insert_cap_node(struct ceph_inode_info *ci, struct ceph_cap *new) { struct rb_node **p = &ci->i_caps.rb_node; struct rb_node *parent = NULL; struct ceph_cap *cap = NULL; while (*p) { parent = *p; cap = rb_entry(parent, struct ceph_cap, ci_node); if (new->mds < cap->mds) p = &(*p)->rb_left; else if (new->mds > cap->mds) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->ci_node, parent, p); rb_insert_color(&new->ci_node, &ci->i_caps); } /* * (re)set cap hold timeouts, which control the delayed release * of unused caps back to the MDS. Should be called on cap use. */ static void __cap_set_timeouts(struct ceph_mds_client *mdsc, struct ceph_inode_info *ci) { struct ceph_mount_options *ma = mdsc->fsc->mount_options; ci->i_hold_caps_min = round_jiffies(jiffies + ma->caps_wanted_delay_min * HZ); ci->i_hold_caps_max = round_jiffies(jiffies + ma->caps_wanted_delay_max * HZ); dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode, ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies); } /* * (Re)queue cap at the end of the delayed cap release list. * * If I_FLUSH is set, leave the inode at the front of the list. * * Caller holds i_ceph_lock * -> we take mdsc->cap_delay_lock */ static void __cap_delay_requeue(struct ceph_mds_client *mdsc, struct ceph_inode_info *ci) { __cap_set_timeouts(mdsc, ci); dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode, ci->i_ceph_flags, ci->i_hold_caps_max); if (!mdsc->stopping) { spin_lock(&mdsc->cap_delay_lock); if (!list_empty(&ci->i_cap_delay_list)) { if (ci->i_ceph_flags & CEPH_I_FLUSH) goto no_change; list_del_init(&ci->i_cap_delay_list); } list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); no_change: spin_unlock(&mdsc->cap_delay_lock); } } /* * Queue an inode for immediate writeback. Mark inode with I_FLUSH, * indicating we should send a cap message to flush dirty metadata * asap, and move to the front of the delayed cap list. */ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, struct ceph_inode_info *ci) { dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode); spin_lock(&mdsc->cap_delay_lock); ci->i_ceph_flags |= CEPH_I_FLUSH; if (!list_empty(&ci->i_cap_delay_list)) list_del_init(&ci->i_cap_delay_list); list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list); spin_unlock(&mdsc->cap_delay_lock); } /* * Cancel delayed work on cap. * * Caller must hold i_ceph_lock. */ static void __cap_delay_cancel(struct ceph_mds_client *mdsc, struct ceph_inode_info *ci) { dout("__cap_delay_cancel %p\n", &ci->vfs_inode); if (list_empty(&ci->i_cap_delay_list)) return; spin_lock(&mdsc->cap_delay_lock); list_del_init(&ci->i_cap_delay_list); spin_unlock(&mdsc->cap_delay_lock); } /* * Common issue checks for add_cap, handle_cap_grant. */ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, unsigned issued) { unsigned had = __ceph_caps_issued(ci, NULL); /* * Each time we receive FILE_CACHE anew, we increment * i_rdcache_gen. */ if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) ci->i_rdcache_gen++; /* * if we are newly issued FILE_SHARED, clear D_COMPLETE; we * don't know what happened to this directory while we didn't * have the cap. */ if ((issued & CEPH_CAP_FILE_SHARED) && (had & CEPH_CAP_FILE_SHARED) == 0) { ci->i_shared_gen++; if (S_ISDIR(ci->vfs_inode.i_mode)) ceph_dir_clear_complete(&ci->vfs_inode); } } /* * Add a capability under the given MDS session. * * Caller should hold session snap_rwsem (read) and s_mutex. * * @fmode is the open file mode, if we are opening a file, otherwise * it is < 0. (This is so we can atomically add the cap and add an * open file reference to it.) */ int ceph_add_cap(struct inode *inode, struct ceph_mds_session *session, u64 cap_id, int fmode, unsigned issued, unsigned wanted, unsigned seq, unsigned mseq, u64 realmino, int flags, struct ceph_cap_reservation *caps_reservation) { struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_cap *new_cap = NULL; struct ceph_cap *cap; int mds = session->s_mds; int actual_wanted; dout("add_cap %p mds%d cap %llx %s seq %d\n", inode, session->s_mds, cap_id, ceph_cap_string(issued), seq); /* * If we are opening the file, include file mode wanted bits * in wanted. */ if (fmode >= 0) wanted |= ceph_caps_for_mode(fmode); retry: spin_lock(&ci->i_ceph_lock); cap = __get_cap_for_mds(ci, mds); if (!cap) { if (new_cap) { cap = new_cap; new_cap = NULL; } else { spin_unlock(&ci->i_ceph_lock); new_cap = get_cap(mdsc, caps_reservation); if (new_cap == NULL) return -ENOMEM; goto retry; } cap->issued = 0; cap->implemented = 0; cap->mds = mds; cap->mds_wanted = 0; cap->ci = ci; __insert_cap_node(ci, cap); /* clear out old exporting info? (i.e. on cap import) */ if (ci->i_cap_exporting_mds == mds) { ci->i_cap_exporting_issued = 0; ci->i_cap_exporting_mseq = 0; ci->i_cap_exporting_mds = -1; } /* add to session cap list */ cap->session = session; spin_lock(&session->s_cap_lock); list_add_tail(&cap->session_caps, &session->s_caps); session->s_nr_caps++; spin_unlock(&session->s_cap_lock); } else if (new_cap) ceph_put_cap(mdsc, new_cap); if (!ci->i_snap_realm) { /* * add this inode to the appropriate snap realm */ struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc, realmino); if (realm) { ceph_get_snap_realm(mdsc, realm); spin_lock(&realm->inodes_with_caps_lock); ci->i_snap_realm = realm; list_add(&ci->i_snap_realm_item, &realm->inodes_with_caps); spin_unlock(&realm->inodes_with_caps_lock); } else { pr_err("ceph_add_cap: couldn't find snap realm %llx\n", realmino); WARN_ON(!realm); } } __check_cap_issue(ci, cap, issued); /* * If we are issued caps we don't want, or the mds' wanted * value appears to be off, queue a check so we'll release * later and/or update the mds wanted value. */ actual_wanted = __ceph_caps_wanted(ci); if ((wanted & ~actual_wanted) || (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) { dout(" issued %s, mds wanted %s, actual %s, queueing\n", ceph_cap_string(issued), ceph_cap_string(wanted), ceph_cap_string(actual_wanted)); __cap_delay_requeue(mdsc, ci); } if (flags & CEPH_CAP_FLAG_AUTH) ci->i_auth_cap = cap; else if (ci->i_auth_cap == cap) ci->i_auth_cap = NULL; dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n", inode, ceph_vinop(inode), cap, ceph_cap_string(issued), ceph_cap_string(issued|cap->issued), seq, mds); cap->cap_id = cap_id; cap->issued = issued; cap->implemented |= issued; cap->mds_wanted |= wanted; cap->seq = seq; cap->issue_seq = seq; cap->mseq = mseq; cap->cap_gen = session->s_cap_gen; if (fmode >= 0) __ceph_get_fmode(ci, fmode); spin_unlock(&ci->i_ceph_lock); wake_up_all(&ci->i_cap_wq); return 0; } /* * Return true if cap has not timed out and belongs to the current * generation of the MDS session (i.e. has not gone 'stale' due to * us losing touch with the mds). */ static int __cap_is_valid(struct ceph_cap *cap) { unsigned long ttl; u32 gen; spin_lock(&cap->session->s_gen_ttl_lock); gen = cap->session->s_cap_gen; ttl = cap->session->s_cap_ttl; spin_unlock(&cap->session->s_gen_ttl_lock); if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { dout("__cap_is_valid %p cap %p issued %s " "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode, cap, ceph_cap_string(cap->issued), cap->cap_gen, gen); return 0; } return 1; } /* * Return set of valid cap bits issued to us. Note that caps time * out, and may be invalidated in bulk if the client session times out * and session->s_cap_gen is bumped. */ int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) { int have = ci->i_snap_caps | ci->i_cap_exporting_issued; struct ceph_cap *cap; struct rb_node *p; if (implemented) *implemented = 0; for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { cap = rb_entry(p, struct ceph_cap, ci_node); if (!__cap_is_valid(cap)) continue; dout("__ceph_caps_issued %p cap %p issued %s\n", &ci->vfs_inode, cap, ceph_cap_string(cap->issued)); have |= cap->issued; if (implemented) *implemented |= cap->implemented; } return have; } /* * Get cap bits issued by caps other than @ocap */ int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap) { int have = ci->i_snap_caps; struct ceph_cap *cap; struct rb_node *p; for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { cap = rb_entry(p, struct ceph_cap, ci_node); if (cap == ocap) continue; if (!__cap_is_valid(cap)) continue; have |= cap->issued; } return have; } /* * Move a cap to the end of the LRU (oldest caps at list head, newest * at list tail). */ static void __touch_cap(struct ceph_cap *cap) { struct ceph_mds_session *s = cap->session; spin_lock(&s->s_cap_lock); if (s->s_cap_iterator == NULL) { dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap, s->s_mds); list_move_tail(&cap->session_caps, &s->s_caps); } else { dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n", &cap->ci->vfs_inode, cap, s->s_mds); } spin_unlock(&s->s_cap_lock); } /* * Check if we hold the given mask. If so, move the cap(s) to the * front of their respective LRUs. (This is the preferred way for * callers to check for caps they want.) */ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) { struct ceph_cap *cap; struct rb_node *p; int have = ci->i_snap_caps; if ((have & mask) == mask) { dout("__ceph_caps_issued_mask %p snap issued %s" " (mask %s)\n", &ci->vfs_inode, ceph_cap_string(have), ceph_cap_string(mask)); return 1; } for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { cap = rb_entry(p, struct ceph_cap, ci_node); if (!__cap_is_valid(cap)) continue; if ((cap->issued & mask) == mask) { dout("__ceph_caps_issued_mask %p cap %p issued %s" " (mask %s)\n", &ci->vfs_inode, cap, ceph_cap_string(cap->issued), ceph_cap_string(mask)); if (touch) __touch_cap(cap); return 1; } /* does a combination of caps satisfy mask? */ have |= cap->issued; if ((have & mask) == mask) { dout("__ceph_caps_issued_mask %p combo issued %s" " (mask %s)\n", &ci->vfs_inode, ceph_cap_string(cap->issued), ceph_cap_string(mask)); if (touch) { struct rb_node *q; /* touch this + preceding caps */ __touch_cap(cap); for (q = rb_first(&ci->i_caps); q != p; q = rb_next(q)) { cap = rb_entry(q, struct ceph_cap, ci_node); if (!__cap_is_valid(cap)) continue; __touch_cap(cap); } } return 1; } } return 0; } /* * Return true if mask caps are currently being revoked by an MDS. */ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) { struct inode *inode = &ci->vfs_inode; struct ceph_cap *cap; struct rb_node *p; int ret = 0; spin_lock(&ci->i_ceph_lock); for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { cap = rb_entry(p, struct ceph_cap, ci_node); if (__cap_is_valid(cap) && (cap->implemented & ~cap->issued & mask)) { ret = 1; break; } } spin_unlock(&ci->i_ceph_lock); dout("ceph_caps_revoking %p %s = %d\n", inode, ceph_cap_string(mask), ret); return ret; } int __ceph_caps_used(struct ceph_inode_info *ci) { int used = 0; if (ci->i_pin_ref) used |= CEPH_CAP_PIN; if (ci->i_rd_ref) used |= CEPH_CAP_FILE_RD; if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages) used |= CEPH_CAP_FILE_CACHE; if (ci->i_wr_ref) used |= CEPH_CAP_FILE_WR; if (ci->i_wb_ref || ci->i_wrbuffer_ref) used |= CEPH_CAP_FILE_BUFFER; return used; } /* * wanted, by virtue of open file modes */ int __ceph_caps_file_wanted(struct ceph_inode_info *ci) { int want = 0; int mode; for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++) if (ci->i_nr_by_mode[mode]) want |= ceph_caps_for_mode(mode); return want; } /* * Return caps we have registered with the MDS(s) as 'wanted'. */ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci) { struct ceph_cap *cap; struct rb_node *p; int mds_wanted = 0; for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { cap = rb_entry(p, struct ceph_cap, ci_node); if (!__cap_is_valid(cap)) continue; mds_wanted |= cap->mds_wanted; } return mds_wanted; } /* * called under i_ceph_lock */ static int __ceph_is_any_caps(struct ceph_inode_info *ci) { return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0; } /* * Remove a cap. Take steps to deal with a racing iterate_session_caps. * * caller should hold i_ceph_lock. * caller will not hold session s_mutex if called from destroy_inode. */ void __ceph_remove_cap(struct ceph_cap *cap) { struct ceph_mds_session *session = cap->session; struct ceph_inode_info *ci = cap->ci; struct ceph_mds_client *mdsc = ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; int removed = 0; dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); /* remove from session list */ spin_lock(&session->s_cap_lock); if (session->s_cap_iterator == cap) { /* not yet, we are iterating over this very cap */ dout("__ceph_remove_cap delaying %p removal from session %p\n", cap, cap->session); } else { list_del_init(&cap->session_caps); session->s_nr_caps--; cap->session = NULL; removed = 1; } /* protect backpointer with s_cap_lock: see iterate_session_caps */ cap->ci = NULL; spin_unlock(&session->s_cap_lock); /* remove from inode list */ rb_erase(&cap->ci_node, &ci->i_caps); if (ci->i_auth_cap == cap) ci->i_auth_cap = NULL; if (removed) ceph_put_cap(mdsc, cap); if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) { struct ceph_snap_realm *realm = ci->i_snap_realm; spin_lock(&realm->inodes_with_caps_lock); list_del_init(&ci->i_snap_realm_item); ci->i_snap_realm_counter++; ci->i_snap_realm = NULL; spin_unlock(&realm->inodes_with_caps_lock); ceph_put_snap_realm(mdsc, realm); } if (!__ceph_is_any_real_caps(ci)) __cap_delay_cancel(mdsc, ci); } /* * Build and send a cap message to the given MDS. * * Caller should be holding s_mutex. */ static int send_cap_msg(struct ceph_mds_session *session, u64 ino, u64 cid, int op, int caps, int wanted, int dirty, u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq, u64 size, u64 max_size, struct timespec *mtime, struct timespec *atime, u64 time_warp_seq, uid_t uid, gid_t gid, umode_t mode, u64 xattr_version, struct ceph_buffer *xattrs_buf, u64 follows) { struct ceph_mds_caps *fc; struct ceph_msg *msg; dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s" " seq %u/%u mseq %u follows %lld size %llu/%llu" " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op), cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted), ceph_cap_string(dirty), seq, issue_seq, mseq, follows, size, max_size, xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0); msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS, false); if (!msg) return -ENOMEM; msg->hdr.tid = cpu_to_le64(flush_tid); fc = msg->front.iov_base; memset(fc, 0, sizeof(*fc)); fc->cap_id = cpu_to_le64(cid); fc->op = cpu_to_le32(op); fc->seq = cpu_to_le32(seq); fc->issue_seq = cpu_to_le32(issue_seq); fc->migrate_seq = cpu_to_le32(mseq); fc->caps = cpu_to_le32(caps); fc->wanted = cpu_to_le32(wanted); fc->dirty = cpu_to_le32(dirty); fc->ino = cpu_to_le64(ino); fc->snap_follows = cpu_to_le64(follows); fc->size = cpu_to_le64(size); fc->max_size = cpu_to_le64(max_size); if (mtime) ceph_encode_timespec(&fc->mtime, mtime); if (atime) ceph_encode_timespec(&fc->atime, atime); fc->time_warp_seq = cpu_to_le32(time_warp_seq); fc->uid = cpu_to_le32(uid); fc->gid = cpu_to_le32(gid); fc->mode = cpu_to_le32(mode); fc->xattr_version = cpu_to_le64(xattr_version); if (xattrs_buf) { msg->middle = ceph_buffer_get(xattrs_buf); fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len); msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len); } ceph_con_send(&session->s_con, msg); return 0; } static void __queue_cap_release(struct ceph_mds_session *session, u64 ino, u64 cap_id, u32 migrate_seq, u32 issue_seq) { struct ceph_msg *msg; struct ceph_mds_cap_release *head; struct ceph_mds_cap_item *item; spin_lock(&session->s_cap_lock); BUG_ON(!session->s_num_cap_releases); msg = list_first_entry(&session->s_cap_releases, struct ceph_msg, list_head); dout(" adding %llx release to mds%d msg %p (%d left)\n", ino, session->s_mds, msg, session->s_num_cap_releases); BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE); head = msg->front.iov_base; head->num = cpu_to_le32(le32_to_cpu(head->num) + 1); item = msg->front.iov_base + msg->front.iov_len; item->ino = cpu_to_le64(ino); item->cap_id = cpu_to_le64(cap_id); item->migrate_seq = cpu_to_le32(migrate_seq); item->seq = cpu_to_le32(issue_seq); session->s_num_cap_releases--; msg->front.iov_len += sizeof(*item); if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { dout(" release msg %p full\n", msg); list_move_tail(&msg->list_head, &session->s_cap_releases_done); } else { dout(" release msg %p at %d/%d (%d)\n", msg, (int)le32_to_cpu(head->num), (int)CEPH_CAPS_PER_RELEASE, (int)msg->front.iov_len); } spin_unlock(&session->s_cap_lock); } /* * Queue cap releases when an inode is dropped from our cache. Since * inode is about to be destroyed, there is no need for i_ceph_lock. */ void ceph_queue_caps_release(struct inode *inode) { struct ceph_inode_info *ci = ceph_inode(inode); struct rb_node *p; p = rb_first(&ci->i_caps); while (p) { struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); struct ceph_mds_session *session = cap->session; __queue_cap_release(session, ceph_ino(inode), cap->cap_id, cap->mseq, cap->issue_seq); p = rb_next(p); __ceph_remove_cap(cap); } } /* * Send a cap msg on the given inode. Update our caps state, then * drop i_ceph_lock and send the message. * * Make note of max_size reported/requested from mds, revoked caps * that have now been implemented. * * Make half-hearted attempt ot to invalidate page cache if we are * dropping RDCACHE. Note that this will leave behind locked pages * that we'll then need to deal with elsewhere. * * Return non-zero if delayed release, or we experienced an error * such that the caller should requeue + retry later. * * called with i_ceph_lock, then drops it. * caller should hold snap_rwsem (read), s_mutex. */ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, int op, int used, int want, int retain, int flushing, unsigned *pflush_tid) __releases(cap->ci->i_ceph_lock) { struct ceph_inode_info *ci = cap->ci; struct inode *inode = &ci->vfs_inode; u64 cap_id = cap->cap_id; int held, revoking, dropping, keep; u64 seq, issue_seq, mseq, time_warp_seq, follows; u64 size, max_size; struct timespec mtime, atime; int wake = 0; umode_t mode; uid_t uid; gid_t gid; struct ceph_mds_session *session; u64 xattr_version = 0; struct ceph_buffer *xattr_blob = NULL; int delayed = 0; u64 flush_tid = 0; int i; int ret; held = cap->issued | cap->implemented; revoking = cap->implemented & ~cap->issued; retain &= ~revoking; dropping = cap->issued & ~retain; dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n", inode, cap, cap->session, ceph_cap_string(held), ceph_cap_string(held & retain), ceph_cap_string(revoking)); BUG_ON((retain & CEPH_CAP_PIN) == 0); session = cap->session; /* don't release wanted unless we've waited a bit. */ if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && time_before(jiffies, ci->i_hold_caps_min)) { dout(" delaying issued %s -> %s, wanted %s -> %s on send\n", ceph_cap_string(cap->issued), ceph_cap_string(cap->issued & retain), ceph_cap_string(cap->mds_wanted), ceph_cap_string(want)); want |= cap->mds_wanted; retain |= cap->issued; delayed = 1; } ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH); cap->issued &= retain; /* drop bits we don't want */ if (cap->implemented & ~cap->issued) { /* * Wake up any waiters on wanted -> needed transition. * This is due to the weird transition from buffered * to sync IO... we need to flush dirty pages _before_ * allowing sync writes to avoid reordering. */ wake = 1; } cap->implemented &= cap->issued | used; cap->mds_wanted = want; if (flushing) { /* * assign a tid for flush operations so we can avoid * flush1 -> dirty1 -> flush2 -> flushack1 -> mark * clean type races. track latest tid for every bit * so we can handle flush AxFw, flush Fw, and have the * first ack clean Ax. */ flush_tid = ++ci->i_cap_flush_last_tid; if (pflush_tid) *pflush_tid = flush_tid; dout(" cap_flush_tid %d\n", (int)flush_tid); for (i = 0; i < CEPH_CAP_BITS; i++) if (flushing & (1 << i)) ci->i_cap_flush_tid[i] = flush_tid; follows = ci->i_head_snapc->seq; } else { follows = 0; } keep = cap->implemented; seq = cap->seq; issue_seq = cap->issue_seq; mseq = cap->mseq; size = inode->i_size; ci->i_reported_size = size; max_size = ci->i_wanted_max_size; ci->i_requested_max_size = max_size; mtime = inode->i_mtime; atime = inode->i_atime; time_warp_seq = ci->i_time_warp_seq; uid = inode->i_uid; gid = inode->i_gid; mode = inode->i_mode; if (flushing & CEPH_CAP_XATTR_EXCL) { __ceph_build_xattrs_blob(ci); xattr_blob = ci->i_xattrs.blob; xattr_version = ci->i_xattrs.version; } spin_unlock(&ci->i_ceph_lock); ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, op, keep, want, flushing, seq, flush_tid, issue_seq, mseq, size, max_size, &mtime, &atime, time_warp_seq, uid, gid, mode, xattr_version, xattr_blob, follows); if (ret < 0) { dout("error sending cap msg, must requeue %p\n", inode); delayed = 1; } if (wake) wake_up_all(&ci->i_cap_wq); return delayed; } /* * When a snapshot is taken, clients accumulate dirty metadata on * inodes with capabilities in ceph_cap_snaps to describe the file * state at the time the snapshot was taken. This must be flushed * asynchronously back to the MDS once sync writes complete and dirty * data is written out. * * Unless @again is true, skip cap_snaps that were already sent to * the MDS (i.e., during this session). * * Called under i_ceph_lock. Takes s_mutex as needed. */ void __ceph_flush_snaps(struct ceph_inode_info *ci, struct ceph_mds_session **psession, int again) __releases(ci->i_ceph_lock) __acquires(ci->i_ceph_lock) { struct inode *inode = &ci->vfs_inode; int mds; struct ceph_cap_snap *capsnap; u32 mseq; struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; struct ceph_mds_session *session = NULL; /* if session != NULL, we hold session->s_mutex */ u64 next_follows = 0; /* keep track of how far we've gotten through the i_cap_snaps list, and skip these entries next time around to avoid an infinite loop */ if (psession) session = *psession; dout("__flush_snaps %p\n", inode); retry: list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { /* avoid an infiniute loop after retry */ if (capsnap->follows < next_follows) continue; /* * we need to wait for sync writes to complete and for dirty * pages to be written out. */ if (capsnap->dirty_pages || capsnap->writing) break; /* * if cap writeback already occurred, we should have dropped * the capsnap in ceph_put_wrbuffer_cap_refs. */ BUG_ON(capsnap->dirty == 0); /* pick mds, take s_mutex */ if (ci->i_auth_cap == NULL) { dout("no auth cap (migrating?), doing nothing\n"); goto out; } /* only flush each capsnap once */ if (!again && !list_empty(&capsnap->flushing_item)) { dout("already flushed %p, skipping\n", capsnap); continue; } mds = ci->i_auth_cap->session->s_mds; mseq = ci->i_auth_cap->mseq; if (session && session->s_mds != mds) { dout("oops, wrong session %p mutex\n", session); mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); session = NULL; } if (!session) { spin_unlock(&ci->i_ceph_lock); mutex_lock(&mdsc->mutex); session = __ceph_lookup_mds_session(mdsc, mds); mutex_unlock(&mdsc->mutex); if (session) { dout("inverting session/ino locks on %p\n", session); mutex_lock(&session->s_mutex); } /* * if session == NULL, we raced against a cap * deletion or migration. retry, and we'll * get a better @mds value next time. */ spin_lock(&ci->i_ceph_lock); goto retry; } capsnap->flush_tid = ++ci->i_cap_flush_last_tid; atomic_inc(&capsnap->nref); if (!list_empty(&capsnap->flushing_item)) list_del_init(&capsnap->flushing_item); list_add_tail(&capsnap->flushing_item, &session->s_cap_snaps_flushing); spin_unlock(&ci->i_ceph_lock); dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n", inode, capsnap, capsnap->follows, capsnap->flush_tid); send_cap_msg(session, ceph_vino(inode).ino, 0, CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0, capsnap->dirty, 0, capsnap->flush_tid, 0, mseq, capsnap->size, 0, &capsnap->mtime, &capsnap->atime, capsnap->time_warp_seq, capsnap->uid, capsnap->gid, capsnap->mode, capsnap->xattr_version, capsnap->xattr_blob, capsnap->follows); next_follows = capsnap->follows + 1; ceph_put_cap_snap(capsnap); spin_lock(&ci->i_ceph_lock); goto retry; } /* we flushed them all; remove this inode from the queue */ spin_lock(&mdsc->snap_flush_lock); list_del_init(&ci->i_snap_flush_item); spin_unlock(&mdsc->snap_flush_lock); out: if (psession) *psession = session; else if (session) { mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); } } static void ceph_flush_snaps(struct ceph_inode_info *ci) { spin_lock(&ci->i_ceph_lock); __ceph_flush_snaps(ci, NULL, 0); spin_unlock(&ci->i_ceph_lock); } /* * Mark caps dirty. If inode is newly dirty, return the dirty flags. * Caller is then responsible for calling __mark_inode_dirty with the * returned flags value. */ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) { struct ceph_mds_client *mdsc = ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; struct inode *inode = &ci->vfs_inode; int was = ci->i_dirty_caps; int dirty = 0; dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, ceph_cap_string(mask), ceph_cap_string(was), ceph_cap_string(was | mask)); ci->i_dirty_caps |= mask; if (was == 0) { if (!ci->i_head_snapc) ci->i_head_snapc = ceph_get_snap_context( ci->i_snap_realm->cached_context); dout(" inode %p now dirty snapc %p auth cap %p\n", &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap); BUG_ON(!list_empty(&ci->i_dirty_item)); spin_lock(&mdsc->cap_dirty_lock); if (ci->i_auth_cap) list_add(&ci->i_dirty_item, &mdsc->cap_dirty); else list_add(&ci->i_dirty_item, &mdsc->cap_dirty_migrating); spin_unlock(&mdsc->cap_dirty_lock); if (ci->i_flushing_caps == 0) { ihold(inode); dirty |= I_DIRTY_SYNC; } } BUG_ON(list_empty(&ci->i_dirty_item)); if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && (mask & CEPH_CAP_FILE_BUFFER)) dirty |= I_DIRTY_DATASYNC; __cap_delay_requeue(mdsc, ci); return dirty; } /* * Add dirty inode to the flushing list. Assigned a seq number so we * can wait for caps to flush without starving. * * Called under i_ceph_lock. */ static int __mark_caps_flushing(struct inode *inode, struct ceph_mds_session *session) { struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); int flushing; BUG_ON(ci->i_dirty_caps == 0); BUG_ON(list_empty(&ci->i_dirty_item)); flushing = ci->i_dirty_caps; dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n", ceph_cap_string(flushing), ceph_cap_string(ci->i_flushing_caps), ceph_cap_string(ci->i_flushing_caps | flushing)); ci->i_flushing_caps |= flushing; ci->i_dirty_caps = 0; dout(" inode %p now !dirty\n", inode); spin_lock(&mdsc->cap_dirty_lock); list_del_init(&ci->i_dirty_item); ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; if (list_empty(&ci->i_flushing_item)) { list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); mdsc->num_cap_flushing++; dout(" inode %p now flushing seq %lld\n", inode, ci->i_cap_flush_seq); } else { list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing); dout(" inode %p now flushing (more) seq %lld\n", inode, ci->i_cap_flush_seq); } spin_unlock(&mdsc->cap_dirty_lock); return flushing; } /* * try to invalidate mapping pages without blocking. */ static int try_nonblocking_invalidate(struct inode *inode) { struct ceph_inode_info *ci = ceph_inode(inode); u32 invalidating_gen = ci->i_rdcache_gen; spin_unlock(&ci->i_ceph_lock); invalidate_mapping_pages(&inode->i_data, 0, -1); spin_lock(&ci->i_ceph_lock); if (inode->i_data.nrpages == 0 && invalidating_gen == ci->i_rdcache_gen) { /* success. */ dout("try_nonblocking_invalidate %p success\n", inode); /* save any racing async invalidate some trouble */ ci->i_rdcache_revoking = ci->i_rdcache_gen - 1; return 0; } dout("try_nonblocking_invalidate %p failed\n", inode); return -1; } /* * Swiss army knife function to examine currently used and wanted * versus held caps. Release, flush, ack revoked caps to mds as * appropriate. * * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay * cap release further. * CHECK_CAPS_AUTHONLY - we should only check the auth cap * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without * further delay. */ void ceph_check_caps(struct ceph_inode_info *ci, int flags, struct ceph_mds_session *session) { struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); struct ceph_mds_client *mdsc = fsc->mdsc; struct inode *inode = &ci->vfs_inode; struct ceph_cap *cap; int file_wanted, used; int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */ int issued, implemented, want, retain, revoking, flushing = 0; int mds = -1; /* keep track of how far we've gone through i_caps list to avoid an infinite loop on retry */ struct rb_node *p; int tried_invalidate = 0; int delayed = 0, sent = 0, force_requeue = 0, num; int queue_invalidate = 0; int is_delayed = flags & CHECK_CAPS_NODELAY; /* if we are unmounting, flush any unused caps immediately. */ if (mdsc->stopping) is_delayed = 1; spin_lock(&ci->i_ceph_lock); if (ci->i_ceph_flags & CEPH_I_FLUSH) flags |= CHECK_CAPS_FLUSH; /* flush snaps first time around only */ if (!list_empty(&ci->i_cap_snaps)) __ceph_flush_snaps(ci, &session, 0); goto retry_locked; retry: spin_lock(&ci->i_ceph_lock); retry_locked: file_wanted = __ceph_caps_file_wanted(ci); used = __ceph_caps_used(ci); want = file_wanted | used; issued = __ceph_caps_issued(ci, &implemented); revoking = implemented & ~issued; retain = want | CEPH_CAP_PIN; if (!mdsc->stopping && inode->i_nlink > 0) { if (want) { retain |= CEPH_CAP_ANY; /* be greedy */ } else { retain |= CEPH_CAP_ANY_SHARED; /* * keep RD only if we didn't have the file open RW, * because then the mds would revoke it anyway to * journal max_size=0. */ if (ci->i_max_size == 0) retain |= CEPH_CAP_ANY_RD; } } dout("check_caps %p file_want %s used %s dirty %s flushing %s" " issued %s revoking %s retain %s %s%s%s\n", inode, ceph_cap_string(file_wanted), ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), ceph_cap_string(ci->i_flushing_caps), ceph_cap_string(issued), ceph_cap_string(revoking), ceph_cap_string(retain), (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "", (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "", (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : ""); /* * If we no longer need to hold onto old our caps, and we may * have cached pages, but don't want them, then try to invalidate. * If we fail, it's because pages are locked.... try again later. */ if ((!is_delayed || mdsc->stopping) && ci->i_wrbuffer_ref == 0 && /* no dirty pages... */ inode->i_data.nrpages && /* have cached pages */ (file_wanted == 0 || /* no open files */ (revoking & (CEPH_CAP_FILE_CACHE| CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */ !tried_invalidate) { dout("check_caps trying to invalidate on %p\n", inode); if (try_nonblocking_invalidate(inode) < 0) { if (revoking & (CEPH_CAP_FILE_CACHE| CEPH_CAP_FILE_LAZYIO)) { dout("check_caps queuing invalidate\n"); queue_invalidate = 1; ci->i_rdcache_revoking = ci->i_rdcache_gen; } else { dout("check_caps failed to invalidate pages\n"); /* we failed to invalidate pages. check these caps again later. */ force_requeue = 1; __cap_set_timeouts(mdsc, ci); } } tried_invalidate = 1; goto retry_locked; } num = 0; for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { cap = rb_entry(p, struct ceph_cap, ci_node); num++; /* avoid looping forever */ if (mds >= cap->mds || ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap)) continue; /* NOTE: no side-effects allowed, until we take s_mutex */ revoking = cap->implemented & ~cap->issued; dout(" mds%d cap %p issued %s implemented %s revoking %s\n", cap->mds, cap, ceph_cap_string(cap->issued), ceph_cap_string(cap->implemented), ceph_cap_string(revoking)); if (cap == ci->i_auth_cap && (cap->issued & CEPH_CAP_FILE_WR)) { /* request larger max_size from MDS? */ if (ci->i_wanted_max_size > ci->i_max_size && ci->i_wanted_max_size > ci->i_requested_max_size) { dout("requesting new max_size\n"); goto ack; } /* approaching file_max? */ if ((inode->i_size << 1) >= ci->i_max_size && (ci->i_reported_size << 1) < ci->i_max_size) { dout("i_size approaching max_size\n"); goto ack; } } /* flush anything dirty? */ if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) { dout("flushing dirty caps\n"); goto ack; } /* completed revocation? going down and there are no caps? */ if (revoking && (revoking & used) == 0) { dout("completed revocation of %s\n", ceph_cap_string(cap->implemented & ~cap->issued)); goto ack; } /* want more caps from mds? */ if (want & ~(cap->mds_wanted | cap->issued)) goto ack; /* things we might delay */ if ((cap->issued & ~retain) == 0 && cap->mds_wanted == want) continue; /* nope, all good */ if (is_delayed) goto ack; /* delay? */ if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && time_before(jiffies, ci->i_hold_caps_max)) { dout(" delaying issued %s -> %s, wanted %s -> %s\n", ceph_cap_string(cap->issued), ceph_cap_string(cap->issued & retain), ceph_cap_string(cap->mds_wanted), ceph_cap_string(want)); delayed++; continue; } ack: if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { dout(" skipping %p I_NOFLUSH set\n", inode); continue; } if (session && session != cap->session) { dout("oops, wrong session %p mutex\n", session); mutex_unlock(&session->s_mutex); session = NULL; } if (!session) { session = cap->session; if (mutex_trylock(&session->s_mutex) == 0) { dout("inverting session/ino locks on %p\n", session); spin_unlock(&ci->i_ceph_lock); if (took_snap_rwsem) { up_read(&mdsc->snap_rwsem); took_snap_rwsem = 0; } mutex_lock(&session->s_mutex); goto retry; } } /* take snap_rwsem after session mutex */ if (!took_snap_rwsem) { if (down_read_trylock(&mdsc->snap_rwsem) == 0) { dout("inverting snap/in locks on %p\n", inode); spin_unlock(&ci->i_ceph_lock); down_read(&mdsc->snap_rwsem); took_snap_rwsem = 1; goto retry; } took_snap_rwsem = 1; } if (cap == ci->i_auth_cap && ci->i_dirty_caps) flushing = __mark_caps_flushing(inode, session); else flushing = 0; mds = cap->mds; /* remember mds, so we don't repeat */ sent++; /* __send_cap drops i_ceph_lock */ delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want, retain, flushing, NULL); goto retry; /* retake i_ceph_lock and restart our cap scan. */ } /* * Reschedule delayed caps release if we delayed anything, * otherwise cancel. */ if (delayed && is_delayed) force_requeue = 1; /* __send_cap delayed release; requeue */ if (!delayed && !is_delayed) __cap_delay_cancel(mdsc, ci); else if (!is_delayed || force_requeue) __cap_delay_requeue(mdsc, ci); spin_unlock(&ci->i_ceph_lock); if (queue_invalidate) ceph_queue_invalidate(inode); if (session) mutex_unlock(&session->s_mutex); if (took_snap_rwsem) up_read(&mdsc->snap_rwsem); } /* * Try to flush dirty caps back to the auth mds. */ static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session, unsigned *flush_tid) { struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); int unlock_session = session ? 0 : 1; int flushing = 0; retry: spin_lock(&ci->i_ceph_lock); if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode); goto out; } if (ci->i_dirty_caps && ci->i_auth_cap) { struct ceph_cap *cap = ci->i_auth_cap; int used = __ceph_caps_used(ci); int want = __ceph_caps_wanted(ci); int delayed; if (!session) { spin_unlock(&ci->i_ceph_lock); session = cap->session; mutex_lock(&session->s_mutex); goto retry; } BUG_ON(session != cap->session); if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) goto out; flushing = __mark_caps_flushing(inode, session); /* __send_cap drops i_ceph_lock */ delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want, cap->issued | cap->implemented, flushing, flush_tid); if (!delayed) goto out_unlocked; spin_lock(&ci->i_ceph_lock); __cap_delay_requeue(mdsc, ci); } out: spin_unlock(&ci->i_ceph_lock); out_unlocked: if (session && unlock_session) mutex_unlock(&session->s_mutex); return flushing; } /* * Return true if we've flushed caps through the given flush_tid. */ static int caps_are_flushed(struct inode *inode, unsigned tid) { struct ceph_inode_info *ci = ceph_inode(inode); int i, ret = 1; spin_lock(&ci->i_ceph_lock); for (i = 0; i < CEPH_CAP_BITS; i++) if ((ci->i_flushing_caps & (1 << i)) && ci->i_cap_flush_tid[i] <= tid) { /* still flushing this bit */ ret = 0; break; } spin_unlock(&ci->i_ceph_lock); return ret; } /* * Wait on any unsafe replies for the given inode. First wait on the * newest request, and make that the upper bound. Then, if there are * more requests, keep waiting on the oldest as long as it is still older * than the original request. */ static void sync_write_wait(struct inode *inode) { struct ceph_inode_info *ci = ceph_inode(inode); struct list_head *head = &ci->i_unsafe_writes; struct ceph_osd_request *req; u64 last_tid; spin_lock(&ci->i_unsafe_lock); if (list_empty(head)) goto out; /* set upper bound as _last_ entry in chain */ req = list_entry(head->prev, struct ceph_osd_request, r_unsafe_item); last_tid = req->r_tid; do { ceph_osdc_get_request(req); spin_unlock(&ci->i_unsafe_lock); dout("sync_write_wait on tid %llu (until %llu)\n", req->r_tid, last_tid); wait_for_completion(&req->r_safe_completion); spin_lock(&ci->i_unsafe_lock); ceph_osdc_put_request(req); /* * from here on look at first entry in chain, since we * only want to wait for anything older than last_tid */ if (list_empty(head)) break; req = list_entry(head->next, struct ceph_osd_request, r_unsafe_item); } while (req->r_tid < last_tid); out: spin_unlock(&ci->i_unsafe_lock); } int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct ceph_inode_info *ci = ceph_inode(inode); unsigned flush_tid; int ret; int dirty; dout("fsync %p%s\n", inode, datasync ? " datasync" : ""); sync_write_wait(inode); ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret < 0) return ret; mutex_lock(&inode->i_mutex); dirty = try_flush_caps(inode, NULL, &flush_tid); dout("fsync dirty caps are %s\n", ceph_cap_string(dirty)); /* * only wait on non-file metadata writeback (the mds * can recover size and mtime, so we don't need to * wait for that) */ if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) { dout("fsync waiting for flush_tid %u\n", flush_tid); ret = wait_event_interruptible(ci->i_cap_wq, caps_are_flushed(inode, flush_tid)); } dout("fsync %p%s done\n", inode, datasync ? " datasync" : ""); mutex_unlock(&inode->i_mutex); return ret; } /* * Flush any dirty caps back to the mds. If we aren't asked to wait, * queue inode for flush but don't do so immediately, because we can * get by with fewer MDS messages if we wait for data writeback to * complete first. */ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc) { struct ceph_inode_info *ci = ceph_inode(inode); unsigned flush_tid; int err = 0; int dirty; int wait = wbc->sync_mode == WB_SYNC_ALL; dout("write_inode %p wait=%d\n", inode, wait); if (wait) { dirty = try_flush_caps(inode, NULL, &flush_tid); if (dirty) err = wait_event_interruptible(ci->i_cap_wq, caps_are_flushed(inode, flush_tid)); } else { struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; spin_lock(&ci->i_ceph_lock); if (__ceph_caps_dirty(ci)) __cap_delay_requeue_front(mdsc, ci); spin_unlock(&ci->i_ceph_lock); } return err; } /* * After a recovering MDS goes active, we need to resend any caps * we were flushing. * * Caller holds session->s_mutex. */ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_cap_snap *capsnap; dout("kick_flushing_capsnaps mds%d\n", session->s_mds); list_for_each_entry(capsnap, &session->s_cap_snaps_flushing, flushing_item) { struct ceph_inode_info *ci = capsnap->ci; struct inode *inode = &ci->vfs_inode; struct ceph_cap *cap; spin_lock(&ci->i_ceph_lock); cap = ci->i_auth_cap; if (cap && cap->session == session) { dout("kick_flushing_caps %p cap %p capsnap %p\n", inode, cap, capsnap); __ceph_flush_snaps(ci, &session, 1); } else { pr_err("%p auth cap %p not mds%d ???\n", inode, cap, session->s_mds); } spin_unlock(&ci->i_ceph_lock); } } void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_inode_info *ci; kick_flushing_capsnaps(mdsc, session); dout("kick_flushing_caps mds%d\n", session->s_mds); list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { struct inode *inode = &ci->vfs_inode; struct ceph_cap *cap; int delayed = 0; spin_lock(&ci->i_ceph_lock); cap = ci->i_auth_cap; if (cap && cap->session == session) { dout("kick_flushing_caps %p cap %p %s\n", inode, cap, ceph_cap_string(ci->i_flushing_caps)); delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, __ceph_caps_used(ci), __ceph_caps_wanted(ci), cap->issued | cap->implemented, ci->i_flushing_caps, NULL); if (delayed) { spin_lock(&ci->i_ceph_lock); __cap_delay_requeue(mdsc, ci); spin_unlock(&ci->i_ceph_lock); } } else { pr_err("%p auth cap %p not mds%d ???\n", inode, cap, session->s_mds); spin_unlock(&ci->i_ceph_lock); } } } static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct inode *inode) { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_cap *cap; int delayed = 0; spin_lock(&ci->i_ceph_lock); cap = ci->i_auth_cap; dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode, ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq); __ceph_flush_snaps(ci, &session, 1); if (ci->i_flushing_caps) { delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, __ceph_caps_used(ci), __ceph_caps_wanted(ci), cap->issued | cap->implemented, ci->i_flushing_caps, NULL); if (delayed) { spin_lock(&ci->i_ceph_lock); __cap_delay_requeue(mdsc, ci); spin_unlock(&ci->i_ceph_lock); } } else { spin_unlock(&ci->i_ceph_lock); } } /* * Take references to capabilities we hold, so that we don't release * them to the MDS prematurely. * * Protected by i_ceph_lock. */ static void __take_cap_refs(struct ceph_inode_info *ci, int got) { if (got & CEPH_CAP_PIN) ci->i_pin_ref++; if (got & CEPH_CAP_FILE_RD) ci->i_rd_ref++; if (got & CEPH_CAP_FILE_CACHE) ci->i_rdcache_ref++; if (got & CEPH_CAP_FILE_WR) ci->i_wr_ref++; if (got & CEPH_CAP_FILE_BUFFER) { if (ci->i_wb_ref == 0) ihold(&ci->vfs_inode); ci->i_wb_ref++; dout("__take_cap_refs %p wb %d -> %d (?)\n", &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref); } } /* * Try to grab cap references. Specify those refs we @want, and the * minimal set we @need. Also include the larger offset we are writing * to (when applicable), and check against max_size here as well. * Note that caller is responsible for ensuring max_size increases are * requested from the MDS. */ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, int *got, loff_t endoff, int *check_max, int *err) { struct inode *inode = &ci->vfs_inode; int ret = 0; int have, implemented; int file_wanted; dout("get_cap_refs %p need %s want %s\n", inode, ceph_cap_string(need), ceph_cap_string(want)); spin_lock(&ci->i_ceph_lock); /* make sure file is actually open */ file_wanted = __ceph_caps_file_wanted(ci); if ((file_wanted & need) == 0) { dout("try_get_cap_refs need %s file_wanted %s, EBADF\n", ceph_cap_string(need), ceph_cap_string(file_wanted)); *err = -EBADF; ret = 1; goto out; } if (need & CEPH_CAP_FILE_WR) { if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { dout("get_cap_refs %p endoff %llu > maxsize %llu\n", inode, endoff, ci->i_max_size); if (endoff > ci->i_wanted_max_size) { *check_max = 1; ret = 1; } goto out; } /* * If a sync write is in progress, we must wait, so that we * can get a final snapshot value for size+mtime. */ if (__ceph_have_pending_cap_snap(ci)) { dout("get_cap_refs %p cap_snap_pending\n", inode); goto out; } } have = __ceph_caps_issued(ci, &implemented); /* * disallow writes while a truncate is pending */ if (ci->i_truncate_pending) have &= ~CEPH_CAP_FILE_WR; if ((have & need) == need) { /* * Look at (implemented & ~have & not) so that we keep waiting * on transition from wanted -> needed caps. This is needed * for WRBUFFER|WR -> WR to avoid a new WR sync write from * going before a prior buffered writeback happens. */ int not = want & ~(have & need); int revoking = implemented & ~have; dout("get_cap_refs %p have %s but not %s (revoking %s)\n", inode, ceph_cap_string(have), ceph_cap_string(not), ceph_cap_string(revoking)); if ((revoking & not) == 0) { *got = need | (have & want); __take_cap_refs(ci, *got); ret = 1; } } else { dout("get_cap_refs %p have %s needed %s\n", inode, ceph_cap_string(have), ceph_cap_string(need)); } out: spin_unlock(&ci->i_ceph_lock); dout("get_cap_refs %p ret %d got %s\n", inode, ret, ceph_cap_string(*got)); return ret; } /* * Check the offset we are writing up to against our current * max_size. If necessary, tell the MDS we want to write to * a larger offset. */ static void check_max_size(struct inode *inode, loff_t endoff) { struct ceph_inode_info *ci = ceph_inode(inode); int check = 0; /* do we need to explicitly request a larger max_size? */ spin_lock(&ci->i_ceph_lock); if ((endoff >= ci->i_max_size || endoff > (inode->i_size << 1)) && endoff > ci->i_wanted_max_size) { dout("write %p at large endoff %llu, req max_size\n", inode, endoff); ci->i_wanted_max_size = endoff; check = 1; } spin_unlock(&ci->i_ceph_lock); if (check) ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); } /* * Wait for caps, and take cap references. If we can't get a WR cap * due to a small max_size, make sure we check_max_size (and possibly * ask the mds) so we don't get hung up indefinitely. */ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got, loff_t endoff) { int check_max, ret, err; retry: if (endoff > 0) check_max_size(&ci->vfs_inode, endoff); check_max = 0; err = 0; ret = wait_event_interruptible(ci->i_cap_wq, try_get_cap_refs(ci, need, want, got, endoff, &check_max, &err)); if (err) ret = err; if (check_max) goto retry; return ret; } /* * Take cap refs. Caller must already know we hold at least one ref * on the caps in question or we don't know this is safe. */ void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) { spin_lock(&ci->i_ceph_lock); __take_cap_refs(ci, caps); spin_unlock(&ci->i_ceph_lock); } /* * Release cap refs. * * If we released the last ref on any given cap, call ceph_check_caps * to release (or schedule a release). * * If we are releasing a WR cap (from a sync write), finalize any affected * cap_snap, and wake up any waiters. */ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) { struct inode *inode = &ci->vfs_inode; int last = 0, put = 0, flushsnaps = 0, wake = 0; struct ceph_cap_snap *capsnap; spin_lock(&ci->i_ceph_lock); if (had & CEPH_CAP_PIN) --ci->i_pin_ref; if (had & CEPH_CAP_FILE_RD) if (--ci->i_rd_ref == 0) last++; if (had & CEPH_CAP_FILE_CACHE) if (--ci->i_rdcache_ref == 0) last++; if (had & CEPH_CAP_FILE_BUFFER) { if (--ci->i_wb_ref == 0) { last++; put++; } dout("put_cap_refs %p wb %d -> %d (?)\n", inode, ci->i_wb_ref+1, ci->i_wb_ref); } if (had & CEPH_CAP_FILE_WR) if (--ci->i_wr_ref == 0) { last++; if (!list_empty(&ci->i_cap_snaps)) { capsnap = list_first_entry(&ci->i_cap_snaps, struct ceph_cap_snap, ci_item); if (capsnap->writing) { capsnap->writing = 0; flushsnaps = __ceph_finish_cap_snap(ci, capsnap); wake = 1; } } } spin_unlock(&ci->i_ceph_lock); dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had), last ? " last" : "", put ? " put" : ""); if (last && !flushsnaps) ceph_check_caps(ci, 0, NULL); else if (flushsnaps) ceph_flush_snaps(ci); if (wake) wake_up_all(&ci->i_cap_wq); if (put) iput(inode); } /* * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap * context. Adjust per-snap dirty page accounting as appropriate. * Once all dirty data for a cap_snap is flushed, flush snapped file * metadata back to the MDS. If we dropped the last ref, call * ceph_check_caps. */ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, struct ceph_snap_context *snapc) { struct inode *inode = &ci->vfs_inode; int last = 0; int complete_capsnap = 0; int drop_capsnap = 0; int found = 0; struct ceph_cap_snap *capsnap = NULL; spin_lock(&ci->i_ceph_lock); ci->i_wrbuffer_ref -= nr; last = !ci->i_wrbuffer_ref; if (ci->i_head_snapc == snapc) { ci->i_wrbuffer_ref_head -= nr; if (ci->i_wrbuffer_ref_head == 0 && ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) { BUG_ON(!ci->i_head_snapc); ceph_put_snap_context(ci->i_head_snapc); ci->i_head_snapc = NULL; } dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n", inode, ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr, ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, last ? " LAST" : ""); } else { list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { if (capsnap->context == snapc) { found = 1; break; } } BUG_ON(!found); capsnap->dirty_pages -= nr; if (capsnap->dirty_pages == 0) { complete_capsnap = 1; if (capsnap->dirty == 0) /* cap writeback completed before we created * the cap_snap; no FLUSHSNAP is needed */ drop_capsnap = 1; } dout("put_wrbuffer_cap_refs on %p cap_snap %p " " snap %lld %d/%d -> %d/%d %s%s%s\n", inode, capsnap, capsnap->context->seq, ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, ci->i_wrbuffer_ref, capsnap->dirty_pages, last ? " (wrbuffer last)" : "", complete_capsnap ? " (complete capsnap)" : "", drop_capsnap ? " (drop capsnap)" : ""); if (drop_capsnap) { ceph_put_snap_context(capsnap->context); list_del(&capsnap->ci_item); list_del(&capsnap->flushing_item); ceph_put_cap_snap(capsnap); } } spin_unlock(&ci->i_ceph_lock); if (last) { ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); iput(inode); } else if (complete_capsnap) { ceph_flush_snaps(ci); wake_up_all(&ci->i_cap_wq); } if (drop_capsnap) iput(inode); } /* * Handle a cap GRANT message from the MDS. (Note that a GRANT may * actually be a revocation if it specifies a smaller cap set.) * * caller holds s_mutex and i_ceph_lock, we drop both. * * return value: * 0 - ok * 1 - check_caps on auth cap only (writeback) * 2 - check_caps (ack revoke) */ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, struct ceph_mds_session *session, struct ceph_cap *cap, struct ceph_buffer *xattr_buf) __releases(ci->i_ceph_lock) { struct ceph_inode_info *ci = ceph_inode(inode); int mds = session->s_mds; int seq = le32_to_cpu(grant->seq); int newcaps = le32_to_cpu(grant->caps); int issued, implemented, used, wanted, dirty; u64 size = le64_to_cpu(grant->size); u64 max_size = le64_to_cpu(grant->max_size); struct timespec mtime, atime, ctime; int check_caps = 0; int wake = 0; int writeback = 0; int revoked_rdcache = 0; int queue_invalidate = 0; dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", inode, cap, mds, seq, ceph_cap_string(newcaps)); dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, inode->i_size); /* * If CACHE is being revoked, and we have no dirty buffers, * try to invalidate (once). (If there are dirty buffers, we * will invalidate _after_ writeback.) */ if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) && (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 && !ci->i_wrbuffer_ref) { if (try_nonblocking_invalidate(inode) == 0) { revoked_rdcache = 1; } else { /* there were locked pages.. invalidate later in a separate thread. */ if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { queue_invalidate = 1; ci->i_rdcache_revoking = ci->i_rdcache_gen; } } } /* side effects now are allowed */ issued = __ceph_caps_issued(ci, &implemented); issued |= implemented | __ceph_caps_dirty(ci); cap->cap_gen = session->s_cap_gen; __check_cap_issue(ci, cap, newcaps); if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { inode->i_mode = le32_to_cpu(grant->mode); inode->i_uid = le32_to_cpu(grant->uid); inode->i_gid = le32_to_cpu(grant->gid); dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, inode->i_uid, inode->i_gid); } if ((issued & CEPH_CAP_LINK_EXCL) == 0) set_nlink(inode, le32_to_cpu(grant->nlink)); if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) { int len = le32_to_cpu(grant->xattr_len); u64 version = le64_to_cpu(grant->xattr_version); if (version > ci->i_xattrs.version) { dout(" got new xattrs v%llu on %p len %d\n", version, inode, len); if (ci->i_xattrs.blob) ceph_buffer_put(ci->i_xattrs.blob); ci->i_xattrs.blob = ceph_buffer_get(xattr_buf); ci->i_xattrs.version = version; } } /* size/ctime/mtime/atime? */ ceph_fill_file_size(inode, issued, le32_to_cpu(grant->truncate_seq), le64_to_cpu(grant->truncate_size), size); ceph_decode_timespec(&mtime, &grant->mtime); ceph_decode_timespec(&atime, &grant->atime); ceph_decode_timespec(&ctime, &grant->ctime); ceph_fill_file_time(inode, issued, le32_to_cpu(grant->time_warp_seq), &ctime, &mtime, &atime); /* max size increase? */ if (ci->i_auth_cap == cap && max_size != ci->i_max_size) { dout("max_size %lld -> %llu\n", ci->i_max_size, max_size); ci->i_max_size = max_size; if (max_size >= ci->i_wanted_max_size) { ci->i_wanted_max_size = 0; /* reset */ ci->i_requested_max_size = 0; } wake = 1; } /* check cap bits */ wanted = __ceph_caps_wanted(ci); used = __ceph_caps_used(ci); dirty = __ceph_caps_dirty(ci); dout(" my wanted = %s, used = %s, dirty %s\n", ceph_cap_string(wanted), ceph_cap_string(used), ceph_cap_string(dirty)); if (wanted != le32_to_cpu(grant->wanted)) { dout("mds wanted %s -> %s\n", ceph_cap_string(le32_to_cpu(grant->wanted)), ceph_cap_string(wanted)); grant->wanted = cpu_to_le32(wanted); } cap->seq = seq; /* file layout may have changed */ ci->i_layout = grant->layout; /* revocation, grant, or no-op? */ if (cap->issued & ~newcaps) { int revoking = cap->issued & ~newcaps; dout("revocation: %s -> %s (revoking %s)\n", ceph_cap_string(cap->issued), ceph_cap_string(newcaps), ceph_cap_string(revoking)); if (revoking & used & CEPH_CAP_FILE_BUFFER) writeback = 1; /* initiate writeback; will delay ack */ else if (revoking == CEPH_CAP_FILE_CACHE && (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 && queue_invalidate) ; /* do nothing yet, invalidation will be queued */ else if (cap == ci->i_auth_cap) check_caps = 1; /* check auth cap only */ else check_caps = 2; /* check all caps */ cap->issued = newcaps; cap->implemented |= newcaps; } else if (cap->issued == newcaps) { dout("caps unchanged: %s -> %s\n", ceph_cap_string(cap->issued), ceph_cap_string(newcaps)); } else { dout("grant: %s -> %s\n", ceph_cap_string(cap->issued), ceph_cap_string(newcaps)); cap->issued = newcaps; cap->implemented |= newcaps; /* add bits only, to * avoid stepping on a * pending revocation */ wake = 1; } BUG_ON(cap->issued & ~cap->implemented); spin_unlock(&ci->i_ceph_lock); if (writeback) /* * queue inode for writeback: we can't actually call * filemap_write_and_wait, etc. from message handler * context. */ ceph_queue_writeback(inode); if (queue_invalidate) ceph_queue_invalidate(inode); if (wake) wake_up_all(&ci->i_cap_wq); if (check_caps == 1) ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY, session); else if (check_caps == 2) ceph_check_caps(ci, CHECK_CAPS_NODELAY, session); else mutex_unlock(&session->s_mutex); } /* * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the * MDS has been safely committed. */ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, struct ceph_mds_caps *m, struct ceph_mds_session *session, struct ceph_cap *cap) __releases(ci->i_ceph_lock) { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; unsigned seq = le32_to_cpu(m->seq); int dirty = le32_to_cpu(m->dirty); int cleaned = 0; int drop = 0; int i; for (i = 0; i < CEPH_CAP_BITS; i++) if ((dirty & (1 << i)) && flush_tid == ci->i_cap_flush_tid[i]) cleaned |= 1 << i; dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s," " flushing %s -> %s\n", inode, session->s_mds, seq, ceph_cap_string(dirty), ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps), ceph_cap_string(ci->i_flushing_caps & ~cleaned)); if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned)) goto out; ci->i_flushing_caps &= ~cleaned; spin_lock(&mdsc->cap_dirty_lock); if (ci->i_flushing_caps == 0) { list_del_init(&ci->i_flushing_item); if (!list_empty(&session->s_cap_flushing)) dout(" mds%d still flushing cap on %p\n", session->s_mds, &list_entry(session->s_cap_flushing.next, struct ceph_inode_info, i_flushing_item)->vfs_inode); mdsc->num_cap_flushing--; wake_up_all(&mdsc->cap_flushing_wq); dout(" inode %p now !flushing\n", inode); if (ci->i_dirty_caps == 0) { dout(" inode %p now clean\n", inode); BUG_ON(!list_empty(&ci->i_dirty_item)); drop = 1; if (ci->i_wrbuffer_ref_head == 0) { BUG_ON(!ci->i_head_snapc); ceph_put_snap_context(ci->i_head_snapc); ci->i_head_snapc = NULL; } } else { BUG_ON(list_empty(&ci->i_dirty_item)); } } spin_unlock(&mdsc->cap_dirty_lock); wake_up_all(&ci->i_cap_wq); out: spin_unlock(&ci->i_ceph_lock); if (drop) iput(inode); } /* * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can * throw away our cap_snap. * * Caller hold s_mutex. */ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid, struct ceph_mds_caps *m, struct ceph_mds_session *session) { struct ceph_inode_info *ci = ceph_inode(inode); u64 follows = le64_to_cpu(m->snap_follows); struct ceph_cap_snap *capsnap; int drop = 0; dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n", inode, ci, session->s_mds, follows); spin_lock(&ci->i_ceph_lock); list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { if (capsnap->follows == follows) { if (capsnap->flush_tid != flush_tid) { dout(" cap_snap %p follows %lld tid %lld !=" " %lld\n", capsnap, follows, flush_tid, capsnap->flush_tid); break; } WARN_ON(capsnap->dirty_pages || capsnap->writing); dout(" removing %p cap_snap %p follows %lld\n", inode, capsnap, follows); ceph_put_snap_context(capsnap->context); list_del(&capsnap->ci_item); list_del(&capsnap->flushing_item); ceph_put_cap_snap(capsnap); drop = 1; break; } else { dout(" skipping cap_snap %p follows %lld\n", capsnap, capsnap->follows); } } spin_unlock(&ci->i_ceph_lock); if (drop) iput(inode); } /* * Handle TRUNC from MDS, indicating file truncation. * * caller hold s_mutex. */ static void handle_cap_trunc(struct inode *inode, struct ceph_mds_caps *trunc, struct ceph_mds_session *session) __releases(ci->i_ceph_lock) { struct ceph_inode_info *ci = ceph_inode(inode); int mds = session->s_mds; int seq = le32_to_cpu(trunc->seq); u32 truncate_seq = le32_to_cpu(trunc->truncate_seq); u64 truncate_size = le64_to_cpu(trunc->truncate_size); u64 size = le64_to_cpu(trunc->size); int implemented = 0; int dirty = __ceph_caps_dirty(ci); int issued = __ceph_caps_issued(ceph_inode(inode), &implemented); int queue_trunc = 0; issued |= implemented | dirty; dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n", inode, mds, seq, truncate_size, truncate_seq); queue_trunc = ceph_fill_file_size(inode, issued, truncate_seq, truncate_size, size); spin_unlock(&ci->i_ceph_lock); if (queue_trunc) ceph_queue_vmtruncate(inode); } /* * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a * different one. If we are the most recent migration we've seen (as * indicated by mseq), make note of the migrating cap bits for the * duration (until we see the corresponding IMPORT). * * caller holds s_mutex */ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, struct ceph_mds_session *session, int *open_target_sessions) { struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); int mds = session->s_mds; unsigned mseq = le32_to_cpu(ex->migrate_seq); struct ceph_cap *cap = NULL, *t; struct rb_node *p; int remember = 1; dout("handle_cap_export inode %p ci %p mds%d mseq %d\n", inode, ci, mds, mseq); spin_lock(&ci->i_ceph_lock); /* make sure we haven't seen a higher mseq */ for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { t = rb_entry(p, struct ceph_cap, ci_node); if (ceph_seq_cmp(t->mseq, mseq) > 0) { dout(" higher mseq on cap from mds%d\n", t->session->s_mds); remember = 0; } if (t->session->s_mds == mds) cap = t; } if (cap) { if (remember) { /* make note */ ci->i_cap_exporting_mds = mds; ci->i_cap_exporting_mseq = mseq; ci->i_cap_exporting_issued = cap->issued; /* * make sure we have open sessions with all possible * export targets, so that we get the matching IMPORT */ *open_target_sessions = 1; /* * we can't flush dirty caps that we've seen the * EXPORT but no IMPORT for */ spin_lock(&mdsc->cap_dirty_lock); if (!list_empty(&ci->i_dirty_item)) { dout(" moving %p to cap_dirty_migrating\n", inode); list_move(&ci->i_dirty_item, &mdsc->cap_dirty_migrating); } spin_unlock(&mdsc->cap_dirty_lock); } __ceph_remove_cap(cap); } /* else, we already released it */ spin_unlock(&ci->i_ceph_lock); } /* * Handle cap IMPORT. If there are temp bits from an older EXPORT, * clean them up. * * caller holds s_mutex. */ static void handle_cap_import(struct ceph_mds_client *mdsc, struct inode *inode, struct ceph_mds_caps *im, struct ceph_mds_session *session, void *snaptrace, int snaptrace_len) { struct ceph_inode_info *ci = ceph_inode(inode); int mds = session->s_mds; unsigned issued = le32_to_cpu(im->caps); unsigned wanted = le32_to_cpu(im->wanted); unsigned seq = le32_to_cpu(im->seq); unsigned mseq = le32_to_cpu(im->migrate_seq); u64 realmino = le64_to_cpu(im->realm); u64 cap_id = le64_to_cpu(im->cap_id); if (ci->i_cap_exporting_mds >= 0 && ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) { dout("handle_cap_import inode %p ci %p mds%d mseq %d" " - cleared exporting from mds%d\n", inode, ci, mds, mseq, ci->i_cap_exporting_mds); ci->i_cap_exporting_issued = 0; ci->i_cap_exporting_mseq = 0; ci->i_cap_exporting_mds = -1; spin_lock(&mdsc->cap_dirty_lock); if (!list_empty(&ci->i_dirty_item)) { dout(" moving %p back to cap_dirty\n", inode); list_move(&ci->i_dirty_item, &mdsc->cap_dirty); } spin_unlock(&mdsc->cap_dirty_lock); } else { dout("handle_cap_import inode %p ci %p mds%d mseq %d\n", inode, ci, mds, mseq); } down_write(&mdsc->snap_rwsem); ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len, false); downgrade_write(&mdsc->snap_rwsem); ceph_add_cap(inode, session, cap_id, -1, issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH, NULL /* no caps context */); kick_flushing_inode_caps(mdsc, session, inode); up_read(&mdsc->snap_rwsem); /* make sure we re-request max_size, if necessary */ spin_lock(&ci->i_ceph_lock); ci->i_wanted_max_size = 0; /* reset */ ci->i_requested_max_size = 0; spin_unlock(&ci->i_ceph_lock); } /* * Handle a caps message from the MDS. * * Identify the appropriate session, inode, and call the right handler * based on the cap op. */ void ceph_handle_caps(struct ceph_mds_session *session, struct ceph_msg *msg) { struct ceph_mds_client *mdsc = session->s_mdsc; struct super_block *sb = mdsc->fsc->sb; struct inode *inode; struct ceph_inode_info *ci; struct ceph_cap *cap; struct ceph_mds_caps *h; int mds = session->s_mds; int op; u32 seq, mseq; struct ceph_vino vino; u64 cap_id; u64 size, max_size; u64 tid; void *snaptrace; size_t snaptrace_len; void *flock; u32 flock_len; int open_target_sessions = 0; dout("handle_caps from mds%d\n", mds); /* decode */ tid = le64_to_cpu(msg->hdr.tid); if (msg->front.iov_len < sizeof(*h)) goto bad; h = msg->front.iov_base; op = le32_to_cpu(h->op); vino.ino = le64_to_cpu(h->ino); vino.snap = CEPH_NOSNAP; cap_id = le64_to_cpu(h->cap_id); seq = le32_to_cpu(h->seq); mseq = le32_to_cpu(h->migrate_seq); size = le64_to_cpu(h->size); max_size = le64_to_cpu(h->max_size); snaptrace = h + 1; snaptrace_len = le32_to_cpu(h->snap_trace_len); if (le16_to_cpu(msg->hdr.version) >= 2) { void *p, *end; p = snaptrace + snaptrace_len; end = msg->front.iov_base + msg->front.iov_len; ceph_decode_32_safe(&p, end, flock_len, bad); flock = p; } else { flock = NULL; flock_len = 0; } mutex_lock(&session->s_mutex); session->s_seq++; dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, (unsigned)seq); /* lookup ino */ inode = ceph_find_inode(sb, vino); ci = ceph_inode(inode); dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino, vino.snap, inode); if (!inode) { dout(" i don't have ino %llx\n", vino.ino); if (op == CEPH_CAP_OP_IMPORT) __queue_cap_release(session, vino.ino, cap_id, mseq, seq); goto flush_cap_releases; } /* these will work even if we don't have a cap yet */ switch (op) { case CEPH_CAP_OP_FLUSHSNAP_ACK: handle_cap_flushsnap_ack(inode, tid, h, session); goto done; case CEPH_CAP_OP_EXPORT: handle_cap_export(inode, h, session, &open_target_sessions); goto done; case CEPH_CAP_OP_IMPORT: handle_cap_import(mdsc, inode, h, session, snaptrace, snaptrace_len); } /* the rest require a cap */ spin_lock(&ci->i_ceph_lock); cap = __get_cap_for_mds(ceph_inode(inode), mds); if (!cap) { dout(" no cap on %p ino %llx.%llx from mds%d\n", inode, ceph_ino(inode), ceph_snap(inode), mds); spin_unlock(&ci->i_ceph_lock); goto flush_cap_releases; } /* note that each of these drops i_ceph_lock for us */ switch (op) { case CEPH_CAP_OP_REVOKE: case CEPH_CAP_OP_GRANT: case CEPH_CAP_OP_IMPORT: handle_cap_grant(inode, h, session, cap, msg->middle); goto done_unlocked; case CEPH_CAP_OP_FLUSH_ACK: handle_cap_flush_ack(inode, tid, h, session, cap); break; case CEPH_CAP_OP_TRUNC: handle_cap_trunc(inode, h, session); break; default: spin_unlock(&ci->i_ceph_lock); pr_err("ceph_handle_caps: unknown cap op %d %s\n", op, ceph_cap_op_name(op)); } goto done; flush_cap_releases: /* * send any full release message to try to move things * along for the mds (who clearly thinks we still have this * cap). */ ceph_add_cap_releases(mdsc, session); ceph_send_cap_releases(mdsc, session); done: mutex_unlock(&session->s_mutex); done_unlocked: if (inode) iput(inode); if (open_target_sessions) ceph_mdsc_open_export_target_sessions(mdsc, session); return; bad: pr_err("ceph_handle_caps: corrupt message\n"); ceph_msg_dump(msg); return; } /* * Delayed work handler to process end of delayed cap release LRU list. */ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) { struct ceph_inode_info *ci; int flags = CHECK_CAPS_NODELAY; dout("check_delayed_caps\n"); while (1) { spin_lock(&mdsc->cap_delay_lock); if (list_empty(&mdsc->cap_delay_list)) break; ci = list_first_entry(&mdsc->cap_delay_list, struct ceph_inode_info, i_cap_delay_list); if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 && time_before(jiffies, ci->i_hold_caps_max)) break; list_del_init(&ci->i_cap_delay_list); spin_unlock(&mdsc->cap_delay_lock); dout("check_delayed_caps on %p\n", &ci->vfs_inode); ceph_check_caps(ci, flags, NULL); } spin_unlock(&mdsc->cap_delay_lock); } /* * Flush all dirty caps to the mds */ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) { struct ceph_inode_info *ci; struct inode *inode; dout("flush_dirty_caps\n"); spin_lock(&mdsc->cap_dirty_lock); while (!list_empty(&mdsc->cap_dirty)) { ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info, i_dirty_item); inode = &ci->vfs_inode; ihold(inode); dout("flush_dirty_caps %p\n", inode); spin_unlock(&mdsc->cap_dirty_lock); ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL); iput(inode); spin_lock(&mdsc->cap_dirty_lock); } spin_unlock(&mdsc->cap_dirty_lock); dout("flush_dirty_caps done\n"); } /* * Drop open file reference. If we were the last open file, * we may need to release capabilities to the MDS (or schedule * their delayed release). */ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode) { struct inode *inode = &ci->vfs_inode; int last = 0; spin_lock(&ci->i_ceph_lock); dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode, ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1); BUG_ON(ci->i_nr_by_mode[fmode] == 0); if (--ci->i_nr_by_mode[fmode] == 0) last++; spin_unlock(&ci->i_ceph_lock); if (last && ci->i_vino.snap == CEPH_NOSNAP) ceph_check_caps(ci, 0, NULL); } /* * Helpers for embedding cap and dentry lease releases into mds * requests. * * @force is used by dentry_release (below) to force inclusion of a * record for the directory inode, even when there aren't any caps to * drop. */ int ceph_encode_inode_release(void **p, struct inode *inode, int mds, int drop, int unless, int force) { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_cap *cap; struct ceph_mds_request_release *rel = *p; int used, dirty; int ret = 0; spin_lock(&ci->i_ceph_lock); used = __ceph_caps_used(ci); dirty = __ceph_caps_dirty(ci); dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n", inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop), ceph_cap_string(unless)); /* only drop unused, clean caps */ drop &= ~(used | dirty); cap = __get_cap_for_mds(ci, mds); if (cap && __cap_is_valid(cap)) { if (force || ((cap->issued & drop) && (cap->issued & unless) == 0)) { if ((cap->issued & drop) && (cap->issued & unless) == 0) { dout("encode_inode_release %p cap %p %s -> " "%s\n", inode, cap, ceph_cap_string(cap->issued), ceph_cap_string(cap->issued & ~drop)); cap->issued &= ~drop; cap->implemented &= ~drop; if (ci->i_ceph_flags & CEPH_I_NODELAY) { int wanted = __ceph_caps_wanted(ci); dout(" wanted %s -> %s (act %s)\n", ceph_cap_string(cap->mds_wanted), ceph_cap_string(cap->mds_wanted & ~wanted), ceph_cap_string(wanted)); cap->mds_wanted &= wanted; } } else { dout("encode_inode_release %p cap %p %s" " (force)\n", inode, cap, ceph_cap_string(cap->issued)); } rel->ino = cpu_to_le64(ceph_ino(inode)); rel->cap_id = cpu_to_le64(cap->cap_id); rel->seq = cpu_to_le32(cap->seq); rel->issue_seq = cpu_to_le32(cap->issue_seq), rel->mseq = cpu_to_le32(cap->mseq); rel->caps = cpu_to_le32(cap->issued); rel->wanted = cpu_to_le32(cap->mds_wanted); rel->dname_len = 0; rel->dname_seq = 0; *p += sizeof(*rel); ret = 1; } else { dout("encode_inode_release %p cap %p %s\n", inode, cap, ceph_cap_string(cap->issued)); } } spin_unlock(&ci->i_ceph_lock); return ret; } int ceph_encode_dentry_release(void **p, struct dentry *dentry, int mds, int drop, int unless) { struct inode *dir = dentry->d_parent->d_inode; struct ceph_mds_request_release *rel = *p; struct ceph_dentry_info *di = ceph_dentry(dentry); int force = 0; int ret; /* * force an record for the directory caps if we have a dentry lease. * this is racy (can't take i_ceph_lock and d_lock together), but it * doesn't have to be perfect; the mds will revoke anything we don't * release. */ spin_lock(&dentry->d_lock); if (di->lease_session && di->lease_session->s_mds == mds) force = 1; spin_unlock(&dentry->d_lock); ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force); spin_lock(&dentry->d_lock); if (ret && di->lease_session && di->lease_session->s_mds == mds) { dout("encode_dentry_release %p mds%d seq %d\n", dentry, mds, (int)di->lease_seq); rel->dname_len = cpu_to_le32(dentry->d_name.len); memcpy(*p, dentry->d_name.name, dentry->d_name.len); *p += dentry->d_name.len; rel->dname_seq = cpu_to_le32(di->lease_seq); __ceph_mdsc_drop_dentry_lease(dentry); } spin_unlock(&dentry->d_lock); return ret; }
gpl-2.0
AudioGod/DTS-Sound-Integration_CAF-Android-kernel
arch/x86/crypto/aesni-intel_glue.c
1526
38611
/* * Support for Intel AES-NI instructions. This file contains glue * code, the real AES implementation is in intel-aes_asm.S. * * Copyright (C) 2008, Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD * interface for 64-bit kernels. * Authors: Adrian Hoban <adrian.hoban@intel.com> * Gabriele Paoloni <gabriele.paoloni@intel.com> * Tadeusz Struk (tadeusz.struk@intel.com) * Aidan O'Mahony (aidan.o.mahony@intel.com) * Copyright (c) 2010, Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/hardirq.h> #include <linux/types.h> #include <linux/crypto.h> #include <linux/module.h> #include <linux/err.h> #include <crypto/algapi.h> #include <crypto/aes.h> #include <crypto/cryptd.h> #include <crypto/ctr.h> #include <crypto/b128ops.h> #include <crypto/lrw.h> #include <crypto/xts.h> #include <asm/cpu_device_id.h> #include <asm/i387.h> #include <asm/crypto/aes.h> #include <asm/crypto/ablk_helper.h> #include <crypto/scatterwalk.h> #include <crypto/internal/aead.h> #include <linux/workqueue.h> #include <linux/spinlock.h> #ifdef CONFIG_X86_64 #include <asm/crypto/glue_helper.h> #endif #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE) #define HAS_PCBC #endif /* This data is stored at the end of the crypto_tfm struct. * It's a type of per "session" data storage location. * This needs to be 16 byte aligned. */ struct aesni_rfc4106_gcm_ctx { u8 hash_subkey[16]; struct crypto_aes_ctx aes_key_expanded; u8 nonce[4]; struct cryptd_aead *cryptd_tfm; }; struct aesni_gcm_set_hash_subkey_result { int err; struct completion completion; }; struct aesni_hash_subkey_req_data { u8 iv[16]; struct aesni_gcm_set_hash_subkey_result result; struct scatterlist sg; }; #define AESNI_ALIGN (16) #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) #define RFC4106_HASH_SUBKEY_SIZE 16 struct aesni_lrw_ctx { struct lrw_table_ctx lrw_table; u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; }; struct aesni_xts_ctx { u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; }; asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len); asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len); asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len); asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); int crypto_fpu_init(void); void crypto_fpu_exit(void); #ifdef CONFIG_X86_64 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, bool enc, u8 *iv); /* asmlinkage void aesni_gcm_enc() * void *ctx, AES Key schedule. Starts on a 16 byte boundary. * u8 *out, Ciphertext output. Encrypt in-place is allowed. * const u8 *in, Plaintext input * unsigned long plaintext_len, Length of data in bytes for encryption. * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) * concatenated with 8 byte Initialisation Vector (from IPSec ESP * Payload) concatenated with 0x00000001. 16-byte aligned pointer. * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. * const u8 *aad, Additional Authentication Data (AAD) * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this * is going to be 8 or 12 bytes * u8 *auth_tag, Authenticated Tag output. * unsigned long auth_tag_len), Authenticated Tag Length in bytes. * Valid values are 16 (most likely), 12 or 8. */ asmlinkage void aesni_gcm_enc(void *ctx, u8 *out, const u8 *in, unsigned long plaintext_len, u8 *iv, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); /* asmlinkage void aesni_gcm_dec() * void *ctx, AES Key schedule. Starts on a 16 byte boundary. * u8 *out, Plaintext output. Decrypt in-place is allowed. * const u8 *in, Ciphertext input * unsigned long ciphertext_len, Length of data in bytes for decryption. * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) * concatenated with 8 byte Initialisation Vector (from IPSec ESP * Payload) concatenated with 0x00000001. 16-byte aligned pointer. * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. * const u8 *aad, Additional Authentication Data (AAD) * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going * to be 8 or 12 bytes * u8 *auth_tag, Authenticated Tag output. * unsigned long auth_tag_len) Authenticated Tag Length in bytes. * Valid values are 16 (most likely), 12 or 8. */ asmlinkage void aesni_gcm_dec(void *ctx, u8 *out, const u8 *in, unsigned long ciphertext_len, u8 *iv, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); static inline struct aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) { return (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *) crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN); } #endif static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) { unsigned long addr = (unsigned long)raw_ctx; unsigned long align = AESNI_ALIGN; if (align <= crypto_tfm_ctx_alignment()) align = 1; return (struct crypto_aes_ctx *)ALIGN(addr, align); } static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); u32 *flags = &tfm->crt_flags; int err; if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && key_len != AES_KEYSIZE_256) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } if (!irq_fpu_usable()) err = crypto_aes_expand_key(ctx, in_key, key_len); else { kernel_fpu_begin(); err = aesni_set_key(ctx, in_key, key_len); kernel_fpu_end(); } return err; } static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len); } static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); if (!irq_fpu_usable()) crypto_aes_encrypt_x86(ctx, dst, src); else { kernel_fpu_begin(); aesni_enc(ctx, dst, src); kernel_fpu_end(); } } static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); if (!irq_fpu_usable()) crypto_aes_decrypt_x86(ctx, dst, src); else { kernel_fpu_begin(); aesni_dec(ctx, dst, src); kernel_fpu_end(); } } static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); aesni_enc(ctx, dst, src); } static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); aesni_dec(ctx, dst, src); } static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } kernel_fpu_end(); return err; } static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } kernel_fpu_end(); return err; } static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } kernel_fpu_end(); return err; } static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } kernel_fpu_end(); return err; } #ifdef CONFIG_X86_64 static void ctr_crypt_final(struct crypto_aes_ctx *ctx, struct blkcipher_walk *walk) { u8 *ctrblk = walk->iv; u8 keystream[AES_BLOCK_SIZE]; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; aesni_enc(ctx, keystream, ctrblk); crypto_xor(keystream, src, nbytes); memcpy(dst, keystream, nbytes); crypto_inc(ctrblk, AES_BLOCK_SIZE); } static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { ctr_crypt_final(ctx, &walk); err = blkcipher_walk_done(desc, &walk, 0); } kernel_fpu_end(); return err; } #endif static int ablk_ecb_init(struct crypto_tfm *tfm) { return ablk_init_common(tfm, "__driver-ecb-aes-aesni"); } static int ablk_cbc_init(struct crypto_tfm *tfm) { return ablk_init_common(tfm, "__driver-cbc-aes-aesni"); } #ifdef CONFIG_X86_64 static int ablk_ctr_init(struct crypto_tfm *tfm) { return ablk_init_common(tfm, "__driver-ctr-aes-aesni"); } #endif #ifdef HAS_PCBC static int ablk_pcbc_init(struct crypto_tfm *tfm) { return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))"); } #endif static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes) { aesni_ecb_enc(ctx, blks, blks, nbytes); } static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes) { aesni_ecb_dec(ctx, blks, blks, nbytes); } static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm); int err; err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key, keylen - AES_BLOCK_SIZE); if (err) return err; return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE); } static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm) { struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_free_table(&ctx->lrw_table); } static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = lrw_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = lrw_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; int err; /* key consists of keys of equal size concatenated, therefore * the length must be even */ if (keylen % 2) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } /* first half of xts-key is for crypt */ err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2); if (err) return err; /* second half of xts-key is for tweak */ return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2, keylen / 2); } static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) { aesni_enc(ctx, out, in); } #ifdef CONFIG_X86_64 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) { glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); } static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) { glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec)); } static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv) { aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv); } static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv) { aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv); } static const struct common_glue_ctx aesni_enc_xts = { .num_funcs = 2, .fpu_blocks_limit = 1, .funcs = { { .num_blocks = 8, .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) } }, { .num_blocks = 1, .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) } } } }; static const struct common_glue_ctx aesni_dec_xts = { .num_funcs = 2, .fpu_blocks_limit = 1, .funcs = { { .num_blocks = 8, .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) } }, { .num_blocks = 1, .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) } } } }; static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes, XTS_TWEAK_CAST(aesni_xts_tweak), aes_ctx(ctx->raw_tweak_ctx), aes_ctx(ctx->raw_crypt_ctx)); } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes, XTS_TWEAK_CAST(aesni_xts_tweak), aes_ctx(ctx->raw_tweak_ctx), aes_ctx(ctx->raw_crypt_ctx)); } #else static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } #endif #ifdef CONFIG_X86_64 static int rfc4106_init(struct crypto_tfm *tfm) { struct cryptd_aead *cryptd_tfm; struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); struct crypto_aead *cryptd_child; struct aesni_rfc4106_gcm_ctx *child_ctx; cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); cryptd_child = cryptd_aead_child(cryptd_tfm); child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); memcpy(child_ctx, ctx, sizeof(*ctx)); ctx->cryptd_tfm = cryptd_tfm; tfm->crt_aead.reqsize = sizeof(struct aead_request) + crypto_aead_reqsize(&cryptd_tfm->base); return 0; } static void rfc4106_exit(struct crypto_tfm *tfm) { struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); if (!IS_ERR(ctx->cryptd_tfm)) cryptd_free_aead(ctx->cryptd_tfm); return; } static void rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) { struct aesni_gcm_set_hash_subkey_result *result = req->data; if (err == -EINPROGRESS) return; result->err = err; complete(&result->completion); } static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_ablkcipher *ctr_tfm; struct ablkcipher_request *req; int ret = -EINVAL; struct aesni_hash_subkey_req_data *req_data; ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); if (IS_ERR(ctr_tfm)) return PTR_ERR(ctr_tfm); crypto_ablkcipher_clear_flags(ctr_tfm, ~0); ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); if (ret) goto out_free_ablkcipher; ret = -ENOMEM; req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); if (!req) goto out_free_ablkcipher; req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) goto out_free_request; memset(req_data->iv, 0, sizeof(req_data->iv)); /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); init_completion(&req_data->result.completion); sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); ablkcipher_request_set_tfm(req, ctr_tfm); ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, rfc4106_set_hash_subkey_done, &req_data->result); ablkcipher_request_set_crypt(req, &req_data->sg, &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); ret = crypto_ablkcipher_encrypt(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible (&req_data->result.completion); if (!ret) ret = req_data->result.err; } kfree(req_data); out_free_request: ablkcipher_request_free(req); out_free_ablkcipher: crypto_free_ablkcipher(ctr_tfm); return ret; } static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { int ret = 0; struct crypto_tfm *tfm = crypto_aead_tfm(parent); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); struct aesni_rfc4106_gcm_ctx *child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); u8 *new_key_align, *new_key_mem = NULL; if (key_len < 4) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; if (key_len != AES_KEYSIZE_128) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) return -EINVAL; if ((unsigned long)key % AESNI_ALIGN) { /*key is not aligned: use an auxuliar aligned pointer*/ new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); if (!new_key_mem) return -ENOMEM; new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN); memcpy(new_key_align, key, key_len); key = new_key_align; } if (!irq_fpu_usable()) ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), key, key_len); else { kernel_fpu_begin(); ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); kernel_fpu_end(); } /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { ret = -EINVAL; goto exit; } ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); memcpy(child_ctx, ctx, sizeof(*ctx)); exit: kfree(new_key_mem); return ret; } /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ static int rfc4106_set_authsize(struct crypto_aead *parent, unsigned int authsize) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } crypto_aead_crt(parent)->authsize = authsize; crypto_aead_crt(cryptd_child)->authsize = authsize; return 0; } static int rfc4106_encrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_encrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.encrypt(req); kernel_fpu_end(); return ret; } } static int rfc4106_decrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_decrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.decrypt(req); kernel_fpu_end(); return ret; } } static int __driver_rfc4106_encrypt(struct aead_request *req) { u8 one_entry_in_sg = 0; u8 *src, *dst, *assoc; __be32 counter = cpu_to_be32(1); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); unsigned long auth_tag_len = crypto_aead_authsize(tfm); u8 iv_tab[16+AESNI_ALIGN]; u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN); struct scatter_walk src_sg_walk; struct scatter_walk assoc_sg_walk; struct scatter_walk dst_sg_walk; unsigned int i; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length equal */ /* to 8 or 12 bytes */ if (unlikely(req->assoclen != 8 && req->assoclen != 12)) return -EINVAL; /* IV below built */ for (i = 0; i < 4; i++) *(iv+i) = ctx->nonce[i]; for (i = 0; i < 8; i++) *(iv+4+i) = req->iv[i]; *((__be32 *)(iv+12)) = counter; if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { one_entry_in_sg = 1; scatterwalk_start(&src_sg_walk, req->src); scatterwalk_start(&assoc_sg_walk, req->assoc); src = scatterwalk_map(&src_sg_walk); assoc = scatterwalk_map(&assoc_sg_walk); dst = src; if (unlikely(req->src != req->dst)) { scatterwalk_start(&dst_sg_walk, req->dst); dst = scatterwalk_map(&dst_sg_walk); } } else { /* Allocate memory for src, dst, assoc */ src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, GFP_ATOMIC); if (unlikely(!src)) return -ENOMEM; assoc = (src + req->cryptlen + auth_tag_len); scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); scatterwalk_map_and_copy(assoc, req->assoc, 0, req->assoclen, 0); dst = src; } aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst + ((unsigned long)req->cryptlen), auth_tag_len); /* The authTag (aka the Integrity Check Value) needs to be written * back to the packet. */ if (one_entry_in_sg) { if (unlikely(req->src != req->dst)) { scatterwalk_unmap(dst); scatterwalk_done(&dst_sg_walk, 0, 0); } scatterwalk_unmap(src); scatterwalk_unmap(assoc); scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0); } else { scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen + auth_tag_len, 1); kfree(src); } return 0; } static int __driver_rfc4106_decrypt(struct aead_request *req) { u8 one_entry_in_sg = 0; u8 *src, *dst, *assoc; unsigned long tempCipherLen = 0; __be32 counter = cpu_to_be32(1); int retval = 0; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); unsigned long auth_tag_len = crypto_aead_authsize(tfm); u8 iv_and_authTag[32+AESNI_ALIGN]; u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN); u8 *authTag = iv + 16; struct scatter_walk src_sg_walk; struct scatter_walk assoc_sg_walk; struct scatter_walk dst_sg_walk; unsigned int i; if (unlikely((req->cryptlen < auth_tag_len) || (req->assoclen != 8 && req->assoclen != 12))) return -EINVAL; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length */ /* equal to 8 or 12 bytes */ tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); /* IV below built */ for (i = 0; i < 4; i++) *(iv+i) = ctx->nonce[i]; for (i = 0; i < 8; i++) *(iv+4+i) = req->iv[i]; *((__be32 *)(iv+12)) = counter; if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { one_entry_in_sg = 1; scatterwalk_start(&src_sg_walk, req->src); scatterwalk_start(&assoc_sg_walk, req->assoc); src = scatterwalk_map(&src_sg_walk); assoc = scatterwalk_map(&assoc_sg_walk); dst = src; if (unlikely(req->src != req->dst)) { scatterwalk_start(&dst_sg_walk, req->dst); dst = scatterwalk_map(&dst_sg_walk); } } else { /* Allocate memory for src, dst, assoc */ src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); if (!src) return -ENOMEM; assoc = (src + req->cryptlen + auth_tag_len); scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); scatterwalk_map_and_copy(assoc, req->assoc, 0, req->assoclen, 0); dst = src; } aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv, ctx->hash_subkey, assoc, (unsigned long)req->assoclen, authTag, auth_tag_len); /* Compare generated tag with passed in tag. */ retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ? -EBADMSG : 0; if (one_entry_in_sg) { if (unlikely(req->src != req->dst)) { scatterwalk_unmap(dst); scatterwalk_done(&dst_sg_walk, 0, 0); } scatterwalk_unmap(src); scatterwalk_unmap(assoc); scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0); } else { scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); kfree(src); } return retval; } #endif static struct crypto_alg aesni_algs[] = { { .cra_name = "aes", .cra_driver_name = "aes-aesni", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1, .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, .cia_encrypt = aes_encrypt, .cia_decrypt = aes_decrypt } } }, { .cra_name = "__aes-aesni", .cra_driver_name = "__driver-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1, .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, .cia_encrypt = __aes_encrypt, .cia_decrypt = __aes_decrypt } } }, { .cra_name = "__ecb-aes-aesni", .cra_driver_name = "__driver-ecb-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1, .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = aes_set_key, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, }, }, { .cra_name = "__cbc-aes-aesni", .cra_driver_name = "__driver-cbc-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1, .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = aes_set_key, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }, }, { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_ecb_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, }, { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_cbc_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, #ifdef CONFIG_X86_64 }, { .cra_name = "__ctr-aes-aesni", .cra_driver_name = "__driver-ctr-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1, .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = aes_set_key, .encrypt = ctr_crypt, .decrypt = ctr_crypt, }, }, }, { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_ctr_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_encrypt, .geniv = "chainiv", }, }, }, { .cra_name = "__gcm-aes-aesni", .cra_driver_name = "__driver-gcm-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_AEAD, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN, .cra_alignmask = 0, .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_u = { .aead = { .encrypt = __driver_rfc4106_encrypt, .decrypt = __driver_rfc4106_decrypt, }, }, }, { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "rfc4106-gcm-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN, .cra_alignmask = 0, .cra_type = &crypto_nivaead_type, .cra_module = THIS_MODULE, .cra_init = rfc4106_init, .cra_exit = rfc4106_exit, .cra_u = { .aead = { .setkey = rfc4106_set_key, .setauthsize = rfc4106_set_authsize, .encrypt = rfc4106_encrypt, .decrypt = rfc4106_decrypt, .geniv = "seqiv", .ivsize = 8, .maxauthsize = 16, }, }, #endif #ifdef HAS_PCBC }, { .cra_name = "pcbc(aes)", .cra_driver_name = "pcbc-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_pcbc_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, #endif }, { .cra_name = "__lrw-aes-aesni", .cra_driver_name = "__driver-lrw-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aesni_lrw_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_exit = lrw_aesni_exit_tfm, .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE, .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = lrw_aesni_setkey, .encrypt = lrw_encrypt, .decrypt = lrw_decrypt, }, }, }, { .cra_name = "__xts-aes-aesni", .cra_driver_name = "__driver-xts-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aesni_xts_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = xts_aesni_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, }, }, }, { .cra_name = "lrw(aes)", .cra_driver_name = "lrw-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE, .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, }, { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, } }; static const struct x86_cpu_id aesni_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_AES), {} }; MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); static int __init aesni_init(void) { int err; if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; err = crypto_fpu_init(); if (err) return err; return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); } static void __exit aesni_exit(void) { crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); crypto_fpu_exit(); } module_init(aesni_init); module_exit(aesni_exit); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); MODULE_LICENSE("GPL"); MODULE_ALIAS("aes");
gpl-2.0
DirtyUnicorns/android_kernel_samsung_hlte
drivers/sensorhub/stm_patek/factory/gesture_max88922.c
1782
4638
/* * Copyright (C) 2012, Samsung Electronics Co. Ltd. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "../ssp.h" #define VENDOR "MAXIM" #define CHIP_ID "MAX88922" static ssize_t gestrue_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", VENDOR); } static ssize_t gestrue_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", CHIP_ID); } static ssize_t raw_data_read(struct device *dev, struct device_attribute *attr, char *buf) { struct ssp_data *data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d,%d,%d,%d\n", data->buf[GESTURE_SENSOR].data[1], data->buf[GESTURE_SENSOR].data[2], data->buf[GESTURE_SENSOR].data[3], data->buf[GESTURE_SENSOR].data[9]); } static ssize_t gesture_get_selftest_show(struct device *dev, struct device_attribute *attr, char *buf) { s16 raw_A = 0, raw_B = 0, raw_C = 0, raw_D = 0; int iRet = 0; char chTempBuf[8] = { 0, }; struct ssp_data *data = dev_get_drvdata(dev); struct ssp_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); msg->cmd = GESTURE_FACTORY; msg->length = 8; msg->options = AP2HUB_READ; msg->buffer = chTempBuf; msg->free_buffer = 0; iRet = ssp_spi_sync(data, msg, 2000); if (iRet != SUCCESS) { pr_err("[SSP]: %s - Gesture Selftest Timeout!!\n", __func__); goto exit; } printk("%x %x %x %x %x %x %x %x \n", chTempBuf[0], chTempBuf[1], chTempBuf[2], chTempBuf[3], chTempBuf[4], chTempBuf[5], chTempBuf[6], chTempBuf[7]); raw_A = ((((s16)chTempBuf[0]) << 8) + ((s16)chTempBuf[1])) - 1023; raw_B = ((((s16)chTempBuf[2]) << 8) + ((s16)chTempBuf[3])) - 1023; raw_C = ((((s16)chTempBuf[4]) << 8) + ((s16)chTempBuf[5])) - 1023; raw_D = ((((s16)chTempBuf[6]) << 8) + ((s16)chTempBuf[7])) - 1023; pr_info("[SSP] %s: self test A = %d, B = %d, C = %d, D = %d\n", __func__, raw_A, raw_B, raw_C, raw_D); exit: return sprintf(buf, "%d,%d,%d,%d\n", raw_A, raw_B, raw_C, raw_D); } static ssize_t ir_current_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ssp_data *data = dev_get_drvdata(dev); ssp_dbg("[SSP]: %s - Ir_Current Setting = %d\n", __func__, data->uIr_Current); return sprintf(buf, "%d\n", data->uIr_Current); } static ssize_t ir_current_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { u16 uNewIrCurrent = DEFUALT_IR_CURRENT; int iRet = 0; u16 current_index = 0; struct ssp_data *data = dev_get_drvdata(dev); static u16 set_current[2][16] = { {0, 6, 13, 20, 26, 33, 40, 46, 53, 60, 66, 73, 80, 86, 93, 100}, {0<<4, 1<<4, 2<<4, 3<<4, 4<<4, 5<<4, 6<<4, 7<<4, 8<<4, 9<<4, 10<<4, 11<<4, 12<<4, 13<<4, 14<<4, 15<<4} }; iRet = kstrtou16(buf, 10, &uNewIrCurrent); if (iRet < 0) pr_err("[SSP]: %s - kstrtoint failed.(%d)\n", __func__, iRet); else { for(current_index = 0; current_index < 16; current_index++) { if (set_current[0][current_index] == uNewIrCurrent) { data->uIr_Current = set_current[1][current_index]; break; } } if(current_index == 16) // current setting value wrong. { return ERROR; } set_gesture_current(data, data->uIr_Current); data->uIr_Current= uNewIrCurrent; } ssp_dbg("[SSP]: %s - new Ir_Current Setting : %d\n", __func__, data->uIr_Current); return size; } static DEVICE_ATTR(vendor, S_IRUGO, gestrue_vendor_show, NULL); static DEVICE_ATTR(name, S_IRUGO, gestrue_name_show, NULL); static DEVICE_ATTR(raw_data, S_IRUGO, raw_data_read, NULL); static DEVICE_ATTR(selftest, S_IRUGO, gesture_get_selftest_show, NULL); static DEVICE_ATTR(ir_current, S_IRUGO | S_IWUSR | S_IWGRP, ir_current_show, ir_current_store); static struct device_attribute *gesture_attrs[] = { &dev_attr_vendor, &dev_attr_name, &dev_attr_raw_data, &dev_attr_selftest, &dev_attr_ir_current, NULL, }; void initialize_gesture_factorytest(struct ssp_data *data) { sensors_register(data->ges_device, data, gesture_attrs, "gesture_sensor"); } void remove_gesture_factorytest(struct ssp_data *data) { sensors_unregister(data->ges_device, gesture_attrs); }
gpl-2.0
yjwong/android_kernel_samsung_galaxys2
drivers/cpufreq/longhaul.c
3062
26507
/* * (C) 2001-2004 Dave Jones. <davej@redhat.com> * (C) 2002 Padraig Brady. <padraig@antefacto.com> * * Licensed under the terms of the GNU GPL License version 2. * Based upon datasheets & sample CPUs kindly provided by VIA. * * VIA have currently 3 different versions of Longhaul. * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147. * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0. * Version 2 of longhaul is backward compatible with v1, but adds * LONGHAUL MSR for purpose of both frequency and voltage scaling. * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C). * Version 3 of longhaul got renamed to Powersaver and redesigned * to use only the POWERSAVER MSR at 0x110a. * It is present in Ezra-T (C5M), Nehemiah (C5X) and above. * It's pretty much the same feature wise to longhaul v2, though * there is provision for scaling FSB too, but this doesn't work * too well in practice so we don't even try to use this. * * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/timex.h> #include <linux/io.h> #include <linux/acpi.h> #include <asm/msr.h> #include <acpi/processor.h> #include "longhaul.h" #define PFX "longhaul: " #define TYPE_LONGHAUL_V1 1 #define TYPE_LONGHAUL_V2 2 #define TYPE_POWERSAVER 3 #define CPU_SAMUEL 1 #define CPU_SAMUEL2 2 #define CPU_EZRA 3 #define CPU_EZRA_T 4 #define CPU_NEHEMIAH 5 #define CPU_NEHEMIAH_C 6 /* Flags */ #define USE_ACPI_C3 (1 << 1) #define USE_NORTHBRIDGE (1 << 2) static int cpu_model; static unsigned int numscales = 16; static unsigned int fsb; static const struct mV_pos *vrm_mV_table; static const unsigned char *mV_vrm_table; static unsigned int highest_speed, lowest_speed; /* kHz */ static unsigned int minmult, maxmult; static int can_scale_voltage; static struct acpi_processor *pr; static struct acpi_processor_cx *cx; static u32 acpi_regs_addr; static u8 longhaul_flags; static unsigned int longhaul_index; /* Module parameters */ static int scale_voltage; static int disable_acpi_c3; static int revid_errata; /* Clock ratios multiplied by 10 */ static int mults[32]; static int eblcr[32]; static int longhaul_version; static struct cpufreq_frequency_table *longhaul_table; static char speedbuffer[8]; static char *print_speed(int speed) { if (speed < 1000) { snprintf(speedbuffer, sizeof(speedbuffer), "%dMHz", speed); return speedbuffer; } if (speed%1000 == 0) snprintf(speedbuffer, sizeof(speedbuffer), "%dGHz", speed/1000); else snprintf(speedbuffer, sizeof(speedbuffer), "%d.%dGHz", speed/1000, (speed%1000)/100); return speedbuffer; } static unsigned int calc_speed(int mult) { int khz; khz = (mult/10)*fsb; if (mult%10) khz += fsb/2; khz *= 1000; return khz; } static int longhaul_get_cpu_mult(void) { unsigned long invalue = 0, lo, hi; rdmsr(MSR_IA32_EBL_CR_POWERON, lo, hi); invalue = (lo & (1<<22|1<<23|1<<24|1<<25))>>22; if (longhaul_version == TYPE_LONGHAUL_V2 || longhaul_version == TYPE_POWERSAVER) { if (lo & (1<<27)) invalue += 16; } return eblcr[invalue]; } /* For processor with BCR2 MSR */ static void do_longhaul1(unsigned int mults_index) { union msr_bcr2 bcr2; rdmsrl(MSR_VIA_BCR2, bcr2.val); /* Enable software clock multiplier */ bcr2.bits.ESOFTBF = 1; bcr2.bits.CLOCKMUL = mults_index & 0xff; /* Sync to timer tick */ safe_halt(); /* Change frequency on next halt or sleep */ wrmsrl(MSR_VIA_BCR2, bcr2.val); /* Invoke transition */ ACPI_FLUSH_CPU_CACHE(); halt(); /* Disable software clock multiplier */ local_irq_disable(); rdmsrl(MSR_VIA_BCR2, bcr2.val); bcr2.bits.ESOFTBF = 0; wrmsrl(MSR_VIA_BCR2, bcr2.val); } /* For processor with Longhaul MSR */ static void do_powersaver(int cx_address, unsigned int mults_index, unsigned int dir) { union msr_longhaul longhaul; u32 t; rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Setup new frequency */ if (!revid_errata) longhaul.bits.RevisionKey = longhaul.bits.RevisionID; else longhaul.bits.RevisionKey = 0; longhaul.bits.SoftBusRatio = mults_index & 0xf; longhaul.bits.SoftBusRatio4 = (mults_index & 0x10) >> 4; /* Setup new voltage */ if (can_scale_voltage) longhaul.bits.SoftVID = (mults_index >> 8) & 0x1f; /* Sync to timer tick */ safe_halt(); /* Raise voltage if necessary */ if (can_scale_voltage && dir) { longhaul.bits.EnableSoftVID = 1; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); } else { ACPI_FLUSH_CPU_CACHE(); /* Invoke C3 */ inb(cx_address); /* Dummy op - must do something useless after P_LVL3 * read */ t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); } /* Change frequency on next halt or sleep */ longhaul.bits.EnableSoftBusRatio = 1; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); } else { ACPI_FLUSH_CPU_CACHE(); /* Invoke C3 */ inb(cx_address); /* Dummy op - must do something useless after P_LVL3 read */ t = inl(acpi_gbl_FADT.xpm_timer_block.address); } /* Disable bus ratio bit */ longhaul.bits.EnableSoftBusRatio = 0; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Reduce voltage if necessary */ if (can_scale_voltage && !dir) { longhaul.bits.EnableSoftVID = 1; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); } else { ACPI_FLUSH_CPU_CACHE(); /* Invoke C3 */ inb(cx_address); /* Dummy op - must do something useless after P_LVL3 * read */ t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); } } /** * longhaul_set_cpu_frequency() * @mults_index : bitpattern of the new multiplier. * * Sets a new clock ratio. */ static void longhaul_setstate(unsigned int table_index) { unsigned int mults_index; int speed, mult; struct cpufreq_freqs freqs; unsigned long flags; unsigned int pic1_mask, pic2_mask; u16 bm_status = 0; u32 bm_timeout = 1000; unsigned int dir = 0; mults_index = longhaul_table[table_index].index; /* Safety precautions */ mult = mults[mults_index & 0x1f]; if (mult == -1) return; speed = calc_speed(mult); if ((speed > highest_speed) || (speed < lowest_speed)) return; /* Voltage transition before frequency transition? */ if (can_scale_voltage && longhaul_index < table_index) dir = 1; freqs.old = calc_speed(longhaul_get_cpu_mult()); freqs.new = speed; freqs.cpu = 0; /* longhaul.c is UP only driver */ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); retry_loop: preempt_disable(); local_irq_save(flags); pic2_mask = inb(0xA1); pic1_mask = inb(0x21); /* works on C3. save mask. */ outb(0xFF, 0xA1); /* Overkill */ outb(0xFE, 0x21); /* TMR0 only */ /* Wait while PCI bus is busy. */ if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE || ((pr != NULL) && pr->flags.bm_control))) { bm_status = inw(acpi_regs_addr); bm_status &= 1 << 4; while (bm_status && bm_timeout) { outw(1 << 4, acpi_regs_addr); bm_timeout--; bm_status = inw(acpi_regs_addr); bm_status &= 1 << 4; } } if (longhaul_flags & USE_NORTHBRIDGE) { /* Disable AGP and PCI arbiters */ outb(3, 0x22); } else if ((pr != NULL) && pr->flags.bm_control) { /* Disable bus master arbitration */ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); } switch (longhaul_version) { /* * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B]) * Software controlled multipliers only. */ case TYPE_LONGHAUL_V1: do_longhaul1(mults_index); break; /* * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C] * * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N]) * Nehemiah can do FSB scaling too, but this has never been proven * to work in practice. */ case TYPE_LONGHAUL_V2: case TYPE_POWERSAVER: if (longhaul_flags & USE_ACPI_C3) { /* Don't allow wakeup */ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0); do_powersaver(cx->address, mults_index, dir); } else { do_powersaver(0, mults_index, dir); } break; } if (longhaul_flags & USE_NORTHBRIDGE) { /* Enable arbiters */ outb(0, 0x22); } else if ((pr != NULL) && pr->flags.bm_control) { /* Enable bus master arbitration */ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); } outb(pic2_mask, 0xA1); /* restore mask */ outb(pic1_mask, 0x21); local_irq_restore(flags); preempt_enable(); freqs.new = calc_speed(longhaul_get_cpu_mult()); /* Check if requested frequency is set. */ if (unlikely(freqs.new != speed)) { printk(KERN_INFO PFX "Failed to set requested frequency!\n"); /* Revision ID = 1 but processor is expecting revision key * equal to 0. Jumpers at the bottom of processor will change * multiplier and FSB, but will not change bits in Longhaul * MSR nor enable voltage scaling. */ if (!revid_errata) { printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" " "option.\n"); revid_errata = 1; msleep(200); goto retry_loop; } /* Why ACPI C3 sometimes doesn't work is a mystery for me. * But it does happen. Processor is entering ACPI C3 state, * but it doesn't change frequency. I tried poking various * bits in northbridge registers, but without success. */ if (longhaul_flags & USE_ACPI_C3) { printk(KERN_INFO PFX "Disabling ACPI C3 support.\n"); longhaul_flags &= ~USE_ACPI_C3; if (revid_errata) { printk(KERN_INFO PFX "Disabling \"Ignore " "Revision ID\" option.\n"); revid_errata = 0; } msleep(200); goto retry_loop; } /* This shouldn't happen. Longhaul ver. 2 was reported not * working on processors without voltage scaling, but with * RevID = 1. RevID errata will make things right. Just * to be 100% sure. */ if (longhaul_version == TYPE_LONGHAUL_V2) { printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n"); longhaul_version = TYPE_LONGHAUL_V1; msleep(200); goto retry_loop; } } /* Report true CPU frequency */ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); if (!bm_timeout) printk(KERN_INFO PFX "Warning: Timeout while waiting for " "idle PCI bus.\n"); } /* * Centaur decided to make life a little more tricky. * Only longhaul v1 is allowed to read EBLCR BSEL[0:1]. * Samuel2 and above have to try and guess what the FSB is. * We do this by assuming we booted at maximum multiplier, and interpolate * between that value multiplied by possible FSBs and cpu_mhz which * was calculated at boot time. Really ugly, but no other way to do this. */ #define ROUNDING 0xf static int guess_fsb(int mult) { int speed = cpu_khz / 1000; int i; int speeds[] = { 666, 1000, 1333, 2000 }; int f_max, f_min; for (i = 0; i < 4; i++) { f_max = ((speeds[i] * mult) + 50) / 100; f_max += (ROUNDING / 2); f_min = f_max - ROUNDING; if ((speed <= f_max) && (speed >= f_min)) return speeds[i] / 10; } return 0; } static int __cpuinit longhaul_get_ranges(void) { unsigned int i, j, k = 0; unsigned int ratio; int mult; /* Get current frequency */ mult = longhaul_get_cpu_mult(); if (mult == -1) { printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n"); return -EINVAL; } fsb = guess_fsb(mult); if (fsb == 0) { printk(KERN_INFO PFX "Invalid (reserved) FSB!\n"); return -EINVAL; } /* Get max multiplier - as we always did. * Longhaul MSR is useful only when voltage scaling is enabled. * C3 is booting at max anyway. */ maxmult = mult; /* Get min multiplier */ switch (cpu_model) { case CPU_NEHEMIAH: minmult = 50; break; case CPU_NEHEMIAH_C: minmult = 40; break; default: minmult = 30; break; } pr_debug("MinMult:%d.%dx MaxMult:%d.%dx\n", minmult/10, minmult%10, maxmult/10, maxmult%10); highest_speed = calc_speed(maxmult); lowest_speed = calc_speed(minmult); pr_debug("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, print_speed(lowest_speed/1000), print_speed(highest_speed/1000)); if (lowest_speed == highest_speed) { printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n"); return -EINVAL; } if (lowest_speed > highest_speed) { printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n", lowest_speed, highest_speed); return -EINVAL; } longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table), GFP_KERNEL); if (!longhaul_table) return -ENOMEM; for (j = 0; j < numscales; j++) { ratio = mults[j]; if (ratio == -1) continue; if (ratio > maxmult || ratio < minmult) continue; longhaul_table[k].frequency = calc_speed(ratio); longhaul_table[k].index = j; k++; } if (k <= 1) { kfree(longhaul_table); return -ENODEV; } /* Sort */ for (j = 0; j < k - 1; j++) { unsigned int min_f, min_i; min_f = longhaul_table[j].frequency; min_i = j; for (i = j + 1; i < k; i++) { if (longhaul_table[i].frequency < min_f) { min_f = longhaul_table[i].frequency; min_i = i; } } if (min_i != j) { swap(longhaul_table[j].frequency, longhaul_table[min_i].frequency); swap(longhaul_table[j].index, longhaul_table[min_i].index); } } longhaul_table[k].frequency = CPUFREQ_TABLE_END; /* Find index we are running on */ for (j = 0; j < k; j++) { if (mults[longhaul_table[j].index & 0x1f] == mult) { longhaul_index = j; break; } } return 0; } static void __cpuinit longhaul_setup_voltagescaling(void) { union msr_longhaul longhaul; struct mV_pos minvid, maxvid, vid; unsigned int j, speed, pos, kHz_step, numvscales; int min_vid_speed; rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); if (!(longhaul.bits.RevisionID & 1)) { printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n"); return; } if (!longhaul.bits.VRMRev) { printk(KERN_INFO PFX "VRM 8.5\n"); vrm_mV_table = &vrm85_mV[0]; mV_vrm_table = &mV_vrm85[0]; } else { printk(KERN_INFO PFX "Mobile VRM\n"); if (cpu_model < CPU_NEHEMIAH) return; vrm_mV_table = &mobilevrm_mV[0]; mV_vrm_table = &mV_mobilevrm[0]; } minvid = vrm_mV_table[longhaul.bits.MinimumVID]; maxvid = vrm_mV_table[longhaul.bits.MaximumVID]; if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) { printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " "Voltage scaling disabled.\n", minvid.mV/1000, minvid.mV%1000, maxvid.mV/1000, maxvid.mV%1000); return; } if (minvid.mV == maxvid.mV) { printk(KERN_INFO PFX "Claims to support voltage scaling but " "min & max are both %d.%03d. " "Voltage scaling disabled\n", maxvid.mV/1000, maxvid.mV%1000); return; } /* How many voltage steps*/ numvscales = maxvid.pos - minvid.pos + 1; printk(KERN_INFO PFX "Max VID=%d.%03d " "Min VID=%d.%03d, " "%d possible voltage scales\n", maxvid.mV/1000, maxvid.mV%1000, minvid.mV/1000, minvid.mV%1000, numvscales); /* Calculate max frequency at min voltage */ j = longhaul.bits.MinMHzBR; if (longhaul.bits.MinMHzBR4) j += 16; min_vid_speed = eblcr[j]; if (min_vid_speed == -1) return; switch (longhaul.bits.MinMHzFSB) { case 0: min_vid_speed *= 13333; break; case 1: min_vid_speed *= 10000; break; case 3: min_vid_speed *= 6666; break; default: return; break; } if (min_vid_speed >= highest_speed) return; /* Calculate kHz for one voltage step */ kHz_step = (highest_speed - min_vid_speed) / numvscales; j = 0; while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { speed = longhaul_table[j].frequency; if (speed > min_vid_speed) pos = (speed - min_vid_speed) / kHz_step + minvid.pos; else pos = minvid.pos; longhaul_table[j].index |= mV_vrm_table[pos] << 8; vid = vrm_mV_table[mV_vrm_table[pos]]; printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", speed, j, vid.mV); j++; } can_scale_voltage = 1; printk(KERN_INFO PFX "Voltage scaling enabled.\n"); } static int longhaul_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, longhaul_table); } static int longhaul_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int table_index = 0; unsigned int i; unsigned int dir = 0; u8 vid, current_vid; if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index)) return -EINVAL; /* Don't set same frequency again */ if (longhaul_index == table_index) return 0; if (!can_scale_voltage) longhaul_setstate(table_index); else { /* On test system voltage transitions exceeding single * step up or down were turning motherboard off. Both * "ondemand" and "userspace" are unsafe. C7 is doing * this in hardware, C3 is old and we need to do this * in software. */ i = longhaul_index; current_vid = (longhaul_table[longhaul_index].index >> 8); current_vid &= 0x1f; if (table_index > longhaul_index) dir = 1; while (i != table_index) { vid = (longhaul_table[i].index >> 8) & 0x1f; if (vid != current_vid) { longhaul_setstate(i); current_vid = vid; msleep(200); } if (dir) i++; else i--; } longhaul_setstate(table_index); } longhaul_index = table_index; return 0; } static unsigned int longhaul_get(unsigned int cpu) { if (cpu) return 0; return calc_speed(longhaul_get_cpu_mult()); } static acpi_status longhaul_walk_callback(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { struct acpi_device *d; if (acpi_bus_get_device(obj_handle, &d)) return 0; *return_value = acpi_driver_data(d); return 1; } /* VIA don't support PM2 reg, but have something similar */ static int enable_arbiter_disable(void) { struct pci_dev *dev; int status = 1; int reg; u8 pci_cmd; /* Find PLE133 host bridge */ reg = 0x78; dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, NULL); /* Find PM133/VT8605 host bridge */ if (dev == NULL) dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8605_0, NULL); /* Find CLE266 host bridge */ if (dev == NULL) { reg = 0x76; dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL); /* Find CN400 V-Link host bridge */ if (dev == NULL) dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL); } if (dev != NULL) { /* Enable access to port 0x22 */ pci_read_config_byte(dev, reg, &pci_cmd); if (!(pci_cmd & 1<<7)) { pci_cmd |= 1<<7; pci_write_config_byte(dev, reg, pci_cmd); pci_read_config_byte(dev, reg, &pci_cmd); if (!(pci_cmd & 1<<7)) { printk(KERN_ERR PFX "Can't enable access to port 0x22.\n"); status = 0; } } pci_dev_put(dev); return status; } return 0; } static int longhaul_setup_southbridge(void) { struct pci_dev *dev; u8 pci_cmd; /* Find VT8235 southbridge */ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); if (dev == NULL) /* Find VT8237 southbridge */ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, NULL); if (dev != NULL) { /* Set transition time to max */ pci_read_config_byte(dev, 0xec, &pci_cmd); pci_cmd &= ~(1 << 2); pci_write_config_byte(dev, 0xec, pci_cmd); pci_read_config_byte(dev, 0xe4, &pci_cmd); pci_cmd &= ~(1 << 7); pci_write_config_byte(dev, 0xe4, pci_cmd); pci_read_config_byte(dev, 0xe5, &pci_cmd); pci_cmd |= 1 << 7; pci_write_config_byte(dev, 0xe5, pci_cmd); /* Get address of ACPI registers block*/ pci_read_config_byte(dev, 0x81, &pci_cmd); if (pci_cmd & 1 << 7) { pci_read_config_dword(dev, 0x88, &acpi_regs_addr); acpi_regs_addr &= 0xff00; printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", acpi_regs_addr); } pci_dev_put(dev); return 1; } return 0; } static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = &cpu_data(0); char *cpuname = NULL; int ret; u32 lo, hi; /* Check what we have on this motherboard */ switch (c->x86_model) { case 6: cpu_model = CPU_SAMUEL; cpuname = "C3 'Samuel' [C5A]"; longhaul_version = TYPE_LONGHAUL_V1; memcpy(mults, samuel1_mults, sizeof(samuel1_mults)); memcpy(eblcr, samuel1_eblcr, sizeof(samuel1_eblcr)); break; case 7: switch (c->x86_mask) { case 0: longhaul_version = TYPE_LONGHAUL_V1; cpu_model = CPU_SAMUEL2; cpuname = "C3 'Samuel 2' [C5B]"; /* Note, this is not a typo, early Samuel2's had * Samuel1 ratios. */ memcpy(mults, samuel1_mults, sizeof(samuel1_mults)); memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr)); break; case 1 ... 15: longhaul_version = TYPE_LONGHAUL_V2; if (c->x86_mask < 8) { cpu_model = CPU_SAMUEL2; cpuname = "C3 'Samuel 2' [C5B]"; } else { cpu_model = CPU_EZRA; cpuname = "C3 'Ezra' [C5C]"; } memcpy(mults, ezra_mults, sizeof(ezra_mults)); memcpy(eblcr, ezra_eblcr, sizeof(ezra_eblcr)); break; } break; case 8: cpu_model = CPU_EZRA_T; cpuname = "C3 'Ezra-T' [C5M]"; longhaul_version = TYPE_POWERSAVER; numscales = 32; memcpy(mults, ezrat_mults, sizeof(ezrat_mults)); memcpy(eblcr, ezrat_eblcr, sizeof(ezrat_eblcr)); break; case 9: longhaul_version = TYPE_POWERSAVER; numscales = 32; memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); switch (c->x86_mask) { case 0 ... 1: cpu_model = CPU_NEHEMIAH; cpuname = "C3 'Nehemiah A' [C5XLOE]"; break; case 2 ... 4: cpu_model = CPU_NEHEMIAH; cpuname = "C3 'Nehemiah B' [C5XLOH]"; break; case 5 ... 15: cpu_model = CPU_NEHEMIAH_C; cpuname = "C3 'Nehemiah C' [C5P]"; break; } break; default: cpuname = "Unknown"; break; } /* Check Longhaul ver. 2 */ if (longhaul_version == TYPE_LONGHAUL_V2) { rdmsr(MSR_VIA_LONGHAUL, lo, hi); if (lo == 0 && hi == 0) /* Looks like MSR isn't present */ longhaul_version = TYPE_LONGHAUL_V1; } printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname); switch (longhaul_version) { case TYPE_LONGHAUL_V1: case TYPE_LONGHAUL_V2: printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version); break; case TYPE_POWERSAVER: printk(KERN_CONT "Powersaver supported.\n"); break; }; /* Doesn't hurt */ longhaul_setup_southbridge(); /* Find ACPI data for processor */ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, &longhaul_walk_callback, NULL, NULL, (void *)&pr); /* Check ACPI support for C3 state */ if (pr != NULL && longhaul_version == TYPE_POWERSAVER) { cx = &pr->power.states[ACPI_STATE_C3]; if (cx->address > 0 && cx->latency <= 1000) longhaul_flags |= USE_ACPI_C3; } /* Disable if it isn't working */ if (disable_acpi_c3) longhaul_flags &= ~USE_ACPI_C3; /* Check if northbridge is friendly */ if (enable_arbiter_disable()) longhaul_flags |= USE_NORTHBRIDGE; /* Check ACPI support for bus master arbiter disable */ if (!(longhaul_flags & USE_ACPI_C3 || longhaul_flags & USE_NORTHBRIDGE) && ((pr == NULL) || !(pr->flags.bm_control))) { printk(KERN_ERR PFX "No ACPI support. Unsupported northbridge.\n"); return -ENODEV; } if (longhaul_flags & USE_NORTHBRIDGE) printk(KERN_INFO PFX "Using northbridge support.\n"); if (longhaul_flags & USE_ACPI_C3) printk(KERN_INFO PFX "Using ACPI support.\n"); ret = longhaul_get_ranges(); if (ret != 0) return ret; if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0)) longhaul_setup_voltagescaling(); policy->cpuinfo.transition_latency = 200000; /* nsec */ policy->cur = calc_speed(longhaul_get_cpu_mult()); ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table); if (ret) return ret; cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); return 0; } static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static struct freq_attr *longhaul_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver longhaul_driver = { .verify = longhaul_verify, .target = longhaul_target, .get = longhaul_get, .init = longhaul_cpu_init, .exit = __devexit_p(longhaul_cpu_exit), .name = "longhaul", .owner = THIS_MODULE, .attr = longhaul_attr, }; static int __init longhaul_init(void) { struct cpuinfo_x86 *c = &cpu_data(0); if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) return -ENODEV; #ifdef CONFIG_SMP if (num_online_cpus() > 1) { printk(KERN_ERR PFX "More than 1 CPU detected, " "longhaul disabled.\n"); return -ENODEV; } #endif #ifdef CONFIG_X86_IO_APIC if (cpu_has_apic) { printk(KERN_ERR PFX "APIC detected. Longhaul is currently " "broken in this configuration.\n"); return -ENODEV; } #endif switch (c->x86_model) { case 6 ... 9: return cpufreq_register_driver(&longhaul_driver); case 10: printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n"); default: ; } return -ENODEV; } static void __exit longhaul_exit(void) { int i; for (i = 0; i < numscales; i++) { if (mults[i] == maxmult) { longhaul_setstate(i); break; } } cpufreq_unregister_driver(&longhaul_driver); kfree(longhaul_table); } /* Even if BIOS is exporting ACPI C3 state, and it is used * with success when CPU is idle, this state doesn't * trigger frequency transition in some cases. */ module_param(disable_acpi_c3, int, 0644); MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); /* Change CPU voltage with frequency. Very useful to save * power, but most VIA C3 processors aren't supporting it. */ module_param(scale_voltage, int, 0644); MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); /* Force revision key to 0 for processors which doesn't * support voltage scaling, but are introducing itself as * such. */ module_param(revid_errata, int, 0644); MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID"); MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors."); MODULE_LICENSE("GPL"); late_initcall(longhaul_init); module_exit(longhaul_exit);
gpl-2.0
bilalliberty/SebastianFM-kernel
drivers/rtc/rtc-max8925.c
4854
8827
/* * RTC driver for Maxim MAX8925 * * Copyright (C) 2009-2010 Marvell International Ltd. * Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include <linux/mfd/max8925.h> enum { RTC_SEC = 0, RTC_MIN, RTC_HOUR, RTC_WEEKDAY, RTC_DATE, RTC_MONTH, RTC_YEAR1, RTC_YEAR2, }; #define MAX8925_RTC_SEC 0x00 #define MAX8925_RTC_MIN 0x01 #define MAX8925_RTC_HOUR 0x02 #define MAX8925_RTC_WEEKDAY 0x03 #define MAX8925_RTC_DATE 0x04 #define MAX8925_RTC_MONTH 0x05 #define MAX8925_RTC_YEAR1 0x06 #define MAX8925_RTC_YEAR2 0x07 #define MAX8925_ALARM0_SEC 0x08 #define MAX8925_ALARM0_MIN 0x09 #define MAX8925_ALARM0_HOUR 0x0a #define MAX8925_ALARM0_WEEKDAY 0x0b #define MAX8925_ALARM0_DATE 0x0c #define MAX8925_ALARM0_MON 0x0d #define MAX8925_ALARM0_YEAR1 0x0e #define MAX8925_ALARM0_YEAR2 0x0f #define MAX8925_ALARM1_SEC 0x10 #define MAX8925_ALARM1_MIN 0x11 #define MAX8925_ALARM1_HOUR 0x12 #define MAX8925_ALARM1_WEEKDAY 0x13 #define MAX8925_ALARM1_DATE 0x14 #define MAX8925_ALARM1_MON 0x15 #define MAX8925_ALARM1_YEAR1 0x16 #define MAX8925_ALARM1_YEAR2 0x17 #define MAX8925_RTC_CNTL 0x1b #define MAX8925_RTC_STATUS 0x20 #define TIME_NUM 8 #define ALARM_1SEC (1 << 7) #define HOUR_12 (1 << 7) #define HOUR_AM_PM (1 << 5) #define ALARM0_IRQ (1 << 3) #define ALARM1_IRQ (1 << 2) #define ALARM0_STATUS (1 << 2) #define ALARM1_STATUS (1 << 1) struct max8925_rtc_info { struct rtc_device *rtc_dev; struct max8925_chip *chip; struct i2c_client *rtc; struct device *dev; }; static irqreturn_t rtc_update_handler(int irq, void *data) { struct max8925_rtc_info *info = (struct max8925_rtc_info *)data; /* disable ALARM0 except for 1SEC alarm */ max8925_set_bits(info->rtc, MAX8925_ALARM0_CNTL, 0x7f, 0); rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF); return IRQ_HANDLED; } static int tm_calc(struct rtc_time *tm, unsigned char *buf, int len) { if (len < TIME_NUM) return -EINVAL; tm->tm_year = (buf[RTC_YEAR2] >> 4) * 1000 + (buf[RTC_YEAR2] & 0xf) * 100 + (buf[RTC_YEAR1] >> 4) * 10 + (buf[RTC_YEAR1] & 0xf); tm->tm_year -= 1900; tm->tm_mon = ((buf[RTC_MONTH] >> 4) & 0x01) * 10 + (buf[RTC_MONTH] & 0x0f); tm->tm_mday = ((buf[RTC_DATE] >> 4) & 0x03) * 10 + (buf[RTC_DATE] & 0x0f); tm->tm_wday = buf[RTC_WEEKDAY] & 0x07; if (buf[RTC_HOUR] & HOUR_12) { tm->tm_hour = ((buf[RTC_HOUR] >> 4) & 0x1) * 10 + (buf[RTC_HOUR] & 0x0f); if (buf[RTC_HOUR] & HOUR_AM_PM) tm->tm_hour += 12; } else tm->tm_hour = ((buf[RTC_HOUR] >> 4) & 0x03) * 10 + (buf[RTC_HOUR] & 0x0f); tm->tm_min = ((buf[RTC_MIN] >> 4) & 0x7) * 10 + (buf[RTC_MIN] & 0x0f); tm->tm_sec = ((buf[RTC_SEC] >> 4) & 0x7) * 10 + (buf[RTC_SEC] & 0x0f); return 0; } static int data_calc(unsigned char *buf, struct rtc_time *tm, int len) { unsigned char high, low; if (len < TIME_NUM) return -EINVAL; high = (tm->tm_year + 1900) / 1000; low = (tm->tm_year + 1900) / 100; low = low - high * 10; buf[RTC_YEAR2] = (high << 4) + low; high = (tm->tm_year + 1900) / 10; low = tm->tm_year + 1900; low = low - high * 10; high = high - (high / 10) * 10; buf[RTC_YEAR1] = (high << 4) + low; high = tm->tm_mon / 10; low = tm->tm_mon; low = low - high * 10; buf[RTC_MONTH] = (high << 4) + low; high = tm->tm_mday / 10; low = tm->tm_mday; low = low - high * 10; buf[RTC_DATE] = (high << 4) + low; buf[RTC_WEEKDAY] = tm->tm_wday; high = tm->tm_hour / 10; low = tm->tm_hour; low = low - high * 10; buf[RTC_HOUR] = (high << 4) + low; high = tm->tm_min / 10; low = tm->tm_min; low = low - high * 10; buf[RTC_MIN] = (high << 4) + low; high = tm->tm_sec / 10; low = tm->tm_sec; low = low - high * 10; buf[RTC_SEC] = (high << 4) + low; return 0; } static int max8925_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct max8925_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[TIME_NUM]; int ret; ret = max8925_bulk_read(info->rtc, MAX8925_RTC_SEC, TIME_NUM, buf); if (ret < 0) goto out; ret = tm_calc(tm, buf, TIME_NUM); out: return ret; } static int max8925_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct max8925_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[TIME_NUM]; int ret; ret = data_calc(buf, tm, TIME_NUM); if (ret < 0) goto out; ret = max8925_bulk_write(info->rtc, MAX8925_RTC_SEC, TIME_NUM, buf); out: return ret; } static int max8925_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct max8925_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[TIME_NUM]; int ret; ret = max8925_bulk_read(info->rtc, MAX8925_ALARM0_SEC, TIME_NUM, buf); if (ret < 0) goto out; ret = tm_calc(&alrm->time, buf, TIME_NUM); if (ret < 0) goto out; ret = max8925_reg_read(info->rtc, MAX8925_RTC_IRQ_MASK); if (ret < 0) goto out; if (ret & ALARM0_IRQ) { alrm->enabled = 0; } else { ret = max8925_reg_read(info->rtc, MAX8925_ALARM0_CNTL); if (ret < 0) goto out; if (!ret) alrm->enabled = 0; else alrm->enabled = 1; } ret = max8925_reg_read(info->rtc, MAX8925_RTC_STATUS); if (ret < 0) goto out; if (ret & ALARM0_STATUS) alrm->pending = 1; else alrm->pending = 0; return 0; out: return ret; } static int max8925_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct max8925_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[TIME_NUM]; int ret; ret = data_calc(buf, &alrm->time, TIME_NUM); if (ret < 0) goto out; ret = max8925_bulk_write(info->rtc, MAX8925_ALARM0_SEC, TIME_NUM, buf); if (ret < 0) goto out; if (alrm->enabled) /* only enable alarm on year/month/day/hour/min/sec */ ret = max8925_reg_write(info->rtc, MAX8925_ALARM0_CNTL, 0x77); else ret = max8925_reg_write(info->rtc, MAX8925_ALARM0_CNTL, 0x0); if (ret < 0) goto out; out: return ret; } static const struct rtc_class_ops max8925_rtc_ops = { .read_time = max8925_rtc_read_time, .set_time = max8925_rtc_set_time, .read_alarm = max8925_rtc_read_alarm, .set_alarm = max8925_rtc_set_alarm, }; static int __devinit max8925_rtc_probe(struct platform_device *pdev) { struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); struct max8925_rtc_info *info; int irq, ret; info = kzalloc(sizeof(struct max8925_rtc_info), GFP_KERNEL); if (!info) return -ENOMEM; info->chip = chip; info->rtc = chip->rtc; info->dev = &pdev->dev; irq = chip->irq_base + MAX8925_IRQ_RTC_ALARM0; ret = request_threaded_irq(irq, NULL, rtc_update_handler, IRQF_ONESHOT, "rtc-alarm0", info); if (ret < 0) { dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", irq, ret); goto out_irq; } dev_set_drvdata(&pdev->dev, info); /* XXX - isn't this redundant? */ platform_set_drvdata(pdev, info); device_init_wakeup(&pdev->dev, 1); info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, &max8925_rtc_ops, THIS_MODULE); ret = PTR_ERR(info->rtc_dev); if (IS_ERR(info->rtc_dev)) { dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); goto out_rtc; } return 0; out_rtc: platform_set_drvdata(pdev, NULL); free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); out_irq: kfree(info); return ret; } static int __devexit max8925_rtc_remove(struct platform_device *pdev) { struct max8925_rtc_info *info = platform_get_drvdata(pdev); if (info) { free_irq(info->chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); rtc_device_unregister(info->rtc_dev); kfree(info); } return 0; } #ifdef CONFIG_PM_SLEEP static int max8925_rtc_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); if (device_may_wakeup(dev)) chip->wakeup_flag |= 1 << MAX8925_IRQ_RTC_ALARM0; return 0; } static int max8925_rtc_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); if (device_may_wakeup(dev)) chip->wakeup_flag &= ~(1 << MAX8925_IRQ_RTC_ALARM0); return 0; } #endif static SIMPLE_DEV_PM_OPS(max8925_rtc_pm_ops, max8925_rtc_suspend, max8925_rtc_resume); static struct platform_driver max8925_rtc_driver = { .driver = { .name = "max8925-rtc", .owner = THIS_MODULE, .pm = &max8925_rtc_pm_ops, }, .probe = max8925_rtc_probe, .remove = __devexit_p(max8925_rtc_remove), }; module_platform_driver(max8925_rtc_driver); MODULE_DESCRIPTION("Maxim MAX8925 RTC driver"); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); MODULE_LICENSE("GPL");
gpl-2.0
EloYGomeZ/caf-j1-exp
kernel/user-return-notifier.c
5366
1353
#include <linux/user-return-notifier.h> #include <linux/percpu.h> #include <linux/sched.h> #include <linux/export.h> static DEFINE_PER_CPU(struct hlist_head, return_notifier_list); /* * Request a notification when the current cpu returns to userspace. Must be * called in atomic context. The notifier will also be called in atomic * context. */ void user_return_notifier_register(struct user_return_notifier *urn) { set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list)); } EXPORT_SYMBOL_GPL(user_return_notifier_register); /* * Removes a registered user return notifier. Must be called from atomic * context, and from the same cpu registration occurred in. */ void user_return_notifier_unregister(struct user_return_notifier *urn) { hlist_del(&urn->link); if (hlist_empty(&__get_cpu_var(return_notifier_list))) clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); } EXPORT_SYMBOL_GPL(user_return_notifier_unregister); /* Calls registered user return notifiers */ void fire_user_return_notifiers(void) { struct user_return_notifier *urn; struct hlist_node *tmp1, *tmp2; struct hlist_head *head; head = &get_cpu_var(return_notifier_list); hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link) urn->on_user_return(urn); put_cpu_var(return_notifier_list); }
gpl-2.0
Snuzzo/funky_redux
drivers/usb/host/sl811_cs.c
8182
5116
/* * PCMCIA driver for SL811HS (as found in REX-CFU1U) * Filename: sl811_cs.c * Author: Yukio Yamamoto * * Port to sl811-hcd and 2.6.x by * Botond Botyanszki <boti@rocketmail.com> * Simon Pickering * * Last update: 2005-05-12 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include <linux/usb/sl811.h> MODULE_AUTHOR("Botond Botyanszki"); MODULE_DESCRIPTION("REX-CFU1U PCMCIA driver for 2.6"); MODULE_LICENSE("GPL"); /*====================================================================*/ /* MACROS */ /*====================================================================*/ #define INFO(args...) printk(KERN_INFO "sl811_cs: " args) /*====================================================================*/ /* VARIABLES */ /*====================================================================*/ typedef struct local_info_t { struct pcmcia_device *p_dev; } local_info_t; static void sl811_cs_release(struct pcmcia_device * link); /*====================================================================*/ static void release_platform_dev(struct device * dev) { dev_dbg(dev, "sl811_cs platform_dev release\n"); dev->parent = NULL; } static struct sl811_platform_data platform_data = { .potpg = 100, .power = 50, /* == 100mA */ // .reset = ... FIXME: invoke CF reset on the card }; static struct resource resources[] = { [0] = { .flags = IORESOURCE_IRQ, }, [1] = { // .name = "address", .flags = IORESOURCE_IO, }, [2] = { // .name = "data", .flags = IORESOURCE_IO, }, }; extern struct platform_driver sl811h_driver; static struct platform_device platform_dev = { .id = -1, .dev = { .platform_data = &platform_data, .release = release_platform_dev, }, .resource = resources, .num_resources = ARRAY_SIZE(resources), }; static int sl811_hc_init(struct device *parent, resource_size_t base_addr, int irq) { if (platform_dev.dev.parent) return -EBUSY; platform_dev.dev.parent = parent; /* finish seting up the platform device */ resources[0].start = irq; resources[1].start = base_addr; resources[1].end = base_addr; resources[2].start = base_addr + 1; resources[2].end = base_addr + 1; /* The driver core will probe for us. We know sl811-hcd has been * initialized already because of the link order dependency created * by referencing "sl811h_driver". */ platform_dev.name = sl811h_driver.driver.name; return platform_device_register(&platform_dev); } /*====================================================================*/ static void sl811_cs_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "sl811_cs_detach\n"); sl811_cs_release(link); /* This points to the parent local_info_t struct */ kfree(link->priv); } static void sl811_cs_release(struct pcmcia_device * link) { dev_dbg(&link->dev, "sl811_cs_release\n"); pcmcia_disable_device(link); platform_device_unregister(&platform_dev); } static int sl811_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) { if (p_dev->config_index == 0) return -EINVAL; return pcmcia_request_io(p_dev); } static int sl811_cs_config(struct pcmcia_device *link) { struct device *parent = &link->dev; int ret; dev_dbg(&link->dev, "sl811_cs_config\n"); link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_IO; if (pcmcia_loop_config(link, sl811_cs_config_check, NULL)) goto failed; /* require an IRQ and two registers */ if (resource_size(link->resource[0]) < 2) goto failed; if (!link->irq) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; if (sl811_hc_init(parent, link->resource[0]->start, link->irq) < 0) { failed: printk(KERN_WARNING "sl811_cs_config failed\n"); sl811_cs_release(link); return -ENODEV; } return 0; } static int sl811_cs_probe(struct pcmcia_device *link) { local_info_t *local; local = kzalloc(sizeof(local_info_t), GFP_KERNEL); if (!local) return -ENOMEM; local->p_dev = link; link->priv = local; return sl811_cs_config(link); } static const struct pcmcia_device_id sl811_ids[] = { PCMCIA_DEVICE_MANF_CARD(0xc015, 0x0001), /* RATOC USB HOST CF+ Card */ PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, sl811_ids); static struct pcmcia_driver sl811_cs_driver = { .owner = THIS_MODULE, .name = "sl811_cs", .probe = sl811_cs_probe, .remove = sl811_cs_detach, .id_table = sl811_ids, }; /*====================================================================*/ static int __init init_sl811_cs(void) { return pcmcia_register_driver(&sl811_cs_driver); } module_init(init_sl811_cs); static void __exit exit_sl811_cs(void) { pcmcia_unregister_driver(&sl811_cs_driver); } module_exit(exit_sl811_cs);
gpl-2.0
EPDCenterSpain/kernel_odys_genio
drivers/pps/kc.c
8438
3840
/* * PPS kernel consumer API * * Copyright (C) 2009-2010 Alexander Gordeev <lasaine@lvk.cs.msu.su> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/pps_kernel.h> #include "kc.h" /* * Global variables */ /* state variables to bind kernel consumer */ DEFINE_SPINLOCK(pps_kc_hardpps_lock); /* PPS API (RFC 2783): current source and mode for kernel consumer */ struct pps_device *pps_kc_hardpps_dev; /* unique pointer to device */ int pps_kc_hardpps_mode; /* mode bits for kernel consumer */ /* pps_kc_bind - control PPS kernel consumer binding * @pps: the PPS source * @bind_args: kernel consumer bind parameters * * This function is used to bind or unbind PPS kernel consumer according to * supplied parameters. Should not be called in interrupt context. */ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args) { /* Check if another consumer is already bound */ spin_lock_irq(&pps_kc_hardpps_lock); if (bind_args->edge == 0) if (pps_kc_hardpps_dev == pps) { pps_kc_hardpps_mode = 0; pps_kc_hardpps_dev = NULL; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "unbound kernel" " consumer\n"); } else { spin_unlock_irq(&pps_kc_hardpps_lock); dev_err(pps->dev, "selected kernel consumer" " is not bound\n"); return -EINVAL; } else if (pps_kc_hardpps_dev == NULL || pps_kc_hardpps_dev == pps) { pps_kc_hardpps_mode = bind_args->edge; pps_kc_hardpps_dev = pps; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "bound kernel consumer: " "edge=0x%x\n", bind_args->edge); } else { spin_unlock_irq(&pps_kc_hardpps_lock); dev_err(pps->dev, "another kernel consumer" " is already bound\n"); return -EINVAL; } return 0; } /* pps_kc_remove - unbind kernel consumer on PPS source removal * @pps: the PPS source * * This function is used to disable kernel consumer on PPS source removal * if this source was bound to PPS kernel consumer. Can be called on any * source safely. Should not be called in interrupt context. */ void pps_kc_remove(struct pps_device *pps) { spin_lock_irq(&pps_kc_hardpps_lock); if (pps == pps_kc_hardpps_dev) { pps_kc_hardpps_mode = 0; pps_kc_hardpps_dev = NULL; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "unbound kernel consumer" " on device removal\n"); } else spin_unlock_irq(&pps_kc_hardpps_lock); } /* pps_kc_event - call hardpps() on PPS event * @pps: the PPS source * @ts: PPS event timestamp * @event: PPS event edge * * This function calls hardpps() when an event from bound PPS source occurs. */ void pps_kc_event(struct pps_device *pps, struct pps_event_time *ts, int event) { unsigned long flags; /* Pass some events to kernel consumer if activated */ spin_lock_irqsave(&pps_kc_hardpps_lock, flags); if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode) hardpps(&ts->ts_real, &ts->ts_raw); spin_unlock_irqrestore(&pps_kc_hardpps_lock, flags); }
gpl-2.0
k2wl/kernel.org
lib/bsearch.c
9206
1615
/* * A generic implementation of binary search for the Linux kernel * * Copyright (C) 2008-2009 Ksplice, Inc. * Author: Tim Abbott <tabbott@ksplice.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2. */ #include <linux/export.h> #include <linux/bsearch.h> /* * bsearch - binary search an array of elements * @key: pointer to item being searched for * @base: pointer to first element to search * @num: number of elements * @size: size of each element * @cmp: pointer to comparison function * * This function does a binary search on the given array. The * contents of the array should already be in ascending sorted order * under the provided comparison function. * * Note that the key need not have the same type as the elements in * the array, e.g. key could be a string and the comparison function * could compare the string with the struct's name field. However, if * the key and elements in the array are of the same type, you can use * the same comparison function for both sort() and bsearch(). */ void *bsearch(const void *key, const void *base, size_t num, size_t size, int (*cmp)(const void *key, const void *elt)) { size_t start = 0, end = num; int result; while (start < end) { size_t mid = start + (end - start) / 2; result = cmp(key, base + mid * size); if (result < 0) end = mid; else if (result > 0) start = mid + 1; else return (void *)base + mid * size; } return NULL; } EXPORT_SYMBOL(bsearch);
gpl-2.0
DirtyUnicorns/android_kernel_htc_msm8660-caf
drivers/media/video/zoran/videocodec.c
12790
9286
/* * VIDEO MOTION CODECs internal API for video devices * * Interface for MJPEG (and maybe later MPEG/WAVELETS) codec's * bound to a master device. * * (c) 2002 Wolfgang Scherr <scherr@net4you.at> * * $Id: videocodec.c,v 1.1.2.8 2003/03/29 07:16:04 rbultje Exp $ * * ------------------------------------------------------------------------ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ------------------------------------------------------------------------ */ #define VIDEOCODEC_VERSION "v0.2" #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> // kernel config is here (procfs flag) #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/uaccess.h> #endif #include "videocodec.h" static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-4)"); #define dprintk(num, format, args...) \ do { \ if (debug >= num) \ printk(format, ##args); \ } while (0) struct attached_list { struct videocodec *codec; struct attached_list *next; }; struct codec_list { const struct videocodec *codec; int attached; struct attached_list *list; struct codec_list *next; }; static struct codec_list *codeclist_top = NULL; /* ================================================= */ /* function prototypes of the master/slave interface */ /* ================================================= */ struct videocodec * videocodec_attach (struct videocodec_master *master) { struct codec_list *h = codeclist_top; struct attached_list *a, *ptr; struct videocodec *codec; int res; if (!master) { dprintk(1, KERN_ERR "videocodec_attach: no data\n"); return NULL; } dprintk(2, "videocodec_attach: '%s', flags %lx, magic %lx\n", master->name, master->flags, master->magic); if (!h) { dprintk(1, KERN_ERR "videocodec_attach: no device available\n"); return NULL; } while (h) { // attach only if the slave has at least the flags // expected by the master if ((master->flags & h->codec->flags) == master->flags) { dprintk(4, "videocodec_attach: try '%s'\n", h->codec->name); if (!try_module_get(h->codec->owner)) return NULL; codec = kmemdup(h->codec, sizeof(struct videocodec), GFP_KERNEL); if (!codec) { dprintk(1, KERN_ERR "videocodec_attach: no mem\n"); goto out_module_put; } snprintf(codec->name, sizeof(codec->name), "%s[%d]", codec->name, h->attached); codec->master_data = master; res = codec->setup(codec); if (res == 0) { dprintk(3, "videocodec_attach '%s'\n", codec->name); ptr = kzalloc(sizeof(struct attached_list), GFP_KERNEL); if (!ptr) { dprintk(1, KERN_ERR "videocodec_attach: no memory\n"); goto out_kfree; } ptr->codec = codec; a = h->list; if (!a) { h->list = ptr; dprintk(4, "videocodec: first element\n"); } else { while (a->next) a = a->next; // find end a->next = ptr; dprintk(4, "videocodec: in after '%s'\n", h->codec->name); } h->attached += 1; return codec; } else { kfree(codec); } } h = h->next; } dprintk(1, KERN_ERR "videocodec_attach: no codec found!\n"); return NULL; out_module_put: module_put(h->codec->owner); out_kfree: kfree(codec); return NULL; } int videocodec_detach (struct videocodec *codec) { struct codec_list *h = codeclist_top; struct attached_list *a, *prev; int res; if (!codec) { dprintk(1, KERN_ERR "videocodec_detach: no data\n"); return -EINVAL; } dprintk(2, "videocodec_detach: '%s', type: %x, flags %lx, magic %lx\n", codec->name, codec->type, codec->flags, codec->magic); if (!h) { dprintk(1, KERN_ERR "videocodec_detach: no device left...\n"); return -ENXIO; } while (h) { a = h->list; prev = NULL; while (a) { if (codec == a->codec) { res = a->codec->unset(a->codec); if (res >= 0) { dprintk(3, "videocodec_detach: '%s'\n", a->codec->name); a->codec->master_data = NULL; } else { dprintk(1, KERN_ERR "videocodec_detach: '%s'\n", a->codec->name); a->codec->master_data = NULL; } if (prev == NULL) { h->list = a->next; dprintk(4, "videocodec: delete first\n"); } else { prev->next = a->next; dprintk(4, "videocodec: delete middle\n"); } module_put(a->codec->owner); kfree(a->codec); kfree(a); h->attached -= 1; return 0; } prev = a; a = a->next; } h = h->next; } dprintk(1, KERN_ERR "videocodec_detach: given codec not found!\n"); return -EINVAL; } int videocodec_register (const struct videocodec *codec) { struct codec_list *ptr, *h = codeclist_top; if (!codec) { dprintk(1, KERN_ERR "videocodec_register: no data!\n"); return -EINVAL; } dprintk(2, "videocodec: register '%s', type: %x, flags %lx, magic %lx\n", codec->name, codec->type, codec->flags, codec->magic); ptr = kzalloc(sizeof(struct codec_list), GFP_KERNEL); if (!ptr) { dprintk(1, KERN_ERR "videocodec_register: no memory\n"); return -ENOMEM; } ptr->codec = codec; if (!h) { codeclist_top = ptr; dprintk(4, "videocodec: hooked in as first element\n"); } else { while (h->next) h = h->next; // find the end h->next = ptr; dprintk(4, "videocodec: hooked in after '%s'\n", h->codec->name); } return 0; } int videocodec_unregister (const struct videocodec *codec) { struct codec_list *prev = NULL, *h = codeclist_top; if (!codec) { dprintk(1, KERN_ERR "videocodec_unregister: no data!\n"); return -EINVAL; } dprintk(2, "videocodec: unregister '%s', type: %x, flags %lx, magic %lx\n", codec->name, codec->type, codec->flags, codec->magic); if (!h) { dprintk(1, KERN_ERR "videocodec_unregister: no device left...\n"); return -ENXIO; } while (h) { if (codec == h->codec) { if (h->attached) { dprintk(1, KERN_ERR "videocodec: '%s' is used\n", h->codec->name); return -EBUSY; } dprintk(3, "videocodec: unregister '%s' is ok.\n", h->codec->name); if (prev == NULL) { codeclist_top = h->next; dprintk(4, "videocodec: delete first element\n"); } else { prev->next = h->next; dprintk(4, "videocodec: delete middle element\n"); } kfree(h); return 0; } prev = h; h = h->next; } dprintk(1, KERN_ERR "videocodec_unregister: given codec not found!\n"); return -EINVAL; } #ifdef CONFIG_PROC_FS static int proc_videocodecs_show(struct seq_file *m, void *v) { struct codec_list *h = codeclist_top; struct attached_list *a; seq_printf(m, "<S>lave or attached <M>aster name type flags magic "); seq_printf(m, "(connected as)\n"); h = codeclist_top; while (h) { seq_printf(m, "S %32s %04x %08lx %08lx (TEMPLATE)\n", h->codec->name, h->codec->type, h->codec->flags, h->codec->magic); a = h->list; while (a) { seq_printf(m, "M %32s %04x %08lx %08lx (%s)\n", a->codec->master_data->name, a->codec->master_data->type, a->codec->master_data->flags, a->codec->master_data->magic, a->codec->name); a = a->next; } h = h->next; } return 0; } static int proc_videocodecs_open(struct inode *inode, struct file *file) { return single_open(file, proc_videocodecs_show, NULL); } static const struct file_operations videocodecs_proc_fops = { .owner = THIS_MODULE, .open = proc_videocodecs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* ===================== */ /* hook in driver module */ /* ===================== */ static int __init videocodec_init (void) { #ifdef CONFIG_PROC_FS static struct proc_dir_entry *videocodec_proc_entry; #endif printk(KERN_INFO "Linux video codec intermediate layer: %s\n", VIDEOCODEC_VERSION); #ifdef CONFIG_PROC_FS videocodec_proc_entry = proc_create("videocodecs", 0, NULL, &videocodecs_proc_fops); if (!videocodec_proc_entry) { dprintk(1, KERN_ERR "videocodec: can't init procfs.\n"); } #endif return 0; } static void __exit videocodec_exit (void) { #ifdef CONFIG_PROC_FS remove_proc_entry("videocodecs", NULL); #endif } EXPORT_SYMBOL(videocodec_attach); EXPORT_SYMBOL(videocodec_detach); EXPORT_SYMBOL(videocodec_register); EXPORT_SYMBOL(videocodec_unregister); module_init(videocodec_init); module_exit(videocodec_exit); MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>"); MODULE_DESCRIPTION("Intermediate API module for video codecs " VIDEOCODEC_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
ls2uper/linux
drivers/tty/serial/pxa.c
759
23265
/* * Based on drivers/serial/8250.c by Russell King. * * Author: Nicolas Pitre * Created: Feb 20, 2003 * Copyright: (C) 2003 Monta Vista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Note 1: This driver is made separate from the already too overloaded * 8250.c because it needs some kirks of its own and that'll make it * easier to add DMA support. * * Note 2: I'm too sick of device allocation policies for serial ports. * If someone else wants to request an "official" allocation of major/minor * for this driver please be my guest. And don't forget that new hardware * to come from Intel might have more than 3 or 4 of those UARTs. Let's * hope for a better port registration and dynamic device allocation scheme * with the serial core maintainer satisfaction to appear soon. */ #if defined(CONFIG_SERIAL_PXA_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/serial_reg.h> #include <linux/circ_buf.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> #define PXA_NAME_LEN 8 struct uart_pxa_port { struct uart_port port; unsigned char ier; unsigned char lcr; unsigned char mcr; unsigned int lsr_break_flag; struct clk *clk; char name[PXA_NAME_LEN]; }; static inline unsigned int serial_in(struct uart_pxa_port *up, int offset) { offset <<= 2; return readl(up->port.membase + offset); } static inline void serial_out(struct uart_pxa_port *up, int offset, int value) { offset <<= 2; writel(value, up->port.membase + offset); } static void serial_pxa_enable_ms(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; up->ier |= UART_IER_MSI; serial_out(up, UART_IER, up->ier); } static void serial_pxa_stop_tx(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; if (up->ier & UART_IER_THRI) { up->ier &= ~UART_IER_THRI; serial_out(up, UART_IER, up->ier); } } static void serial_pxa_stop_rx(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; up->ier &= ~UART_IER_RLSI; up->port.read_status_mask &= ~UART_LSR_DR; serial_out(up, UART_IER, up->ier); } static inline void receive_chars(struct uart_pxa_port *up, int *status) { unsigned int ch, flag; int max_count = 256; do { /* work around Errata #20 according to * Intel(R) PXA27x Processor Family * Specification Update (May 2005) * * Step 2 * Disable the Reciever Time Out Interrupt via IER[RTOEI] */ up->ier &= ~UART_IER_RTOIE; serial_out(up, UART_IER, up->ier); ch = serial_in(up, UART_RX); flag = TTY_NORMAL; up->port.icount.rx++; if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE | UART_LSR_OE))) { /* * For statistics only */ if (*status & UART_LSR_BI) { *status &= ~(UART_LSR_FE | UART_LSR_PE); up->port.icount.brk++; /* * We do the SysRQ and SAK checking * here because otherwise the break * may get masked by ignore_status_mask * or read_status_mask. */ if (uart_handle_break(&up->port)) goto ignore_char; } else if (*status & UART_LSR_PE) up->port.icount.parity++; else if (*status & UART_LSR_FE) up->port.icount.frame++; if (*status & UART_LSR_OE) up->port.icount.overrun++; /* * Mask off conditions which should be ignored. */ *status &= up->port.read_status_mask; #ifdef CONFIG_SERIAL_PXA_CONSOLE if (up->port.line == up->port.cons->index) { /* Recover the break flag from console xmit */ *status |= up->lsr_break_flag; up->lsr_break_flag = 0; } #endif if (*status & UART_LSR_BI) { flag = TTY_BREAK; } else if (*status & UART_LSR_PE) flag = TTY_PARITY; else if (*status & UART_LSR_FE) flag = TTY_FRAME; } if (uart_handle_sysrq_char(&up->port, ch)) goto ignore_char; uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag); ignore_char: *status = serial_in(up, UART_LSR); } while ((*status & UART_LSR_DR) && (max_count-- > 0)); tty_flip_buffer_push(&up->port.state->port); /* work around Errata #20 according to * Intel(R) PXA27x Processor Family * Specification Update (May 2005) * * Step 6: * No more data in FIFO: Re-enable RTO interrupt via IER[RTOIE] */ up->ier |= UART_IER_RTOIE; serial_out(up, UART_IER, up->ier); } static void transmit_chars(struct uart_pxa_port *up) { struct circ_buf *xmit = &up->port.state->xmit; int count; if (up->port.x_char) { serial_out(up, UART_TX, up->port.x_char); up->port.icount.tx++; up->port.x_char = 0; return; } if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { serial_pxa_stop_tx(&up->port); return; } count = up->port.fifosize / 2; do { serial_out(up, UART_TX, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; if (uart_circ_empty(xmit)) break; } while (--count > 0); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); if (uart_circ_empty(xmit)) serial_pxa_stop_tx(&up->port); } static void serial_pxa_start_tx(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; if (!(up->ier & UART_IER_THRI)) { up->ier |= UART_IER_THRI; serial_out(up, UART_IER, up->ier); } } /* should hold up->port.lock */ static inline void check_modem_status(struct uart_pxa_port *up) { int status; status = serial_in(up, UART_MSR); if ((status & UART_MSR_ANY_DELTA) == 0) return; if (status & UART_MSR_TERI) up->port.icount.rng++; if (status & UART_MSR_DDSR) up->port.icount.dsr++; if (status & UART_MSR_DDCD) uart_handle_dcd_change(&up->port, status & UART_MSR_DCD); if (status & UART_MSR_DCTS) uart_handle_cts_change(&up->port, status & UART_MSR_CTS); wake_up_interruptible(&up->port.state->port.delta_msr_wait); } /* * This handles the interrupt from one port. */ static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id) { struct uart_pxa_port *up = dev_id; unsigned int iir, lsr; iir = serial_in(up, UART_IIR); if (iir & UART_IIR_NO_INT) return IRQ_NONE; spin_lock(&up->port.lock); lsr = serial_in(up, UART_LSR); if (lsr & UART_LSR_DR) receive_chars(up, &lsr); check_modem_status(up); if (lsr & UART_LSR_THRE) transmit_chars(up); spin_unlock(&up->port.lock); return IRQ_HANDLED; } static unsigned int serial_pxa_tx_empty(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned long flags; unsigned int ret; spin_lock_irqsave(&up->port.lock, flags); ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; spin_unlock_irqrestore(&up->port.lock, flags); return ret; } static unsigned int serial_pxa_get_mctrl(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned char status; unsigned int ret; status = serial_in(up, UART_MSR); ret = 0; if (status & UART_MSR_DCD) ret |= TIOCM_CAR; if (status & UART_MSR_RI) ret |= TIOCM_RNG; if (status & UART_MSR_DSR) ret |= TIOCM_DSR; if (status & UART_MSR_CTS) ret |= TIOCM_CTS; return ret; } static void serial_pxa_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned char mcr = 0; if (mctrl & TIOCM_RTS) mcr |= UART_MCR_RTS; if (mctrl & TIOCM_DTR) mcr |= UART_MCR_DTR; if (mctrl & TIOCM_OUT1) mcr |= UART_MCR_OUT1; if (mctrl & TIOCM_OUT2) mcr |= UART_MCR_OUT2; if (mctrl & TIOCM_LOOP) mcr |= UART_MCR_LOOP; mcr |= up->mcr; serial_out(up, UART_MCR, mcr); } static void serial_pxa_break_ctl(struct uart_port *port, int break_state) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); if (break_state == -1) up->lcr |= UART_LCR_SBC; else up->lcr &= ~UART_LCR_SBC; serial_out(up, UART_LCR, up->lcr); spin_unlock_irqrestore(&up->port.lock, flags); } static int serial_pxa_startup(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned long flags; int retval; if (port->line == 3) /* HWUART */ up->mcr |= UART_MCR_AFE; else up->mcr = 0; up->port.uartclk = clk_get_rate(up->clk); /* * Allocate the IRQ */ retval = request_irq(up->port.irq, serial_pxa_irq, 0, up->name, up); if (retval) return retval; /* * Clear the FIFO buffers and disable them. * (they will be reenabled in set_termios()) */ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_out(up, UART_FCR, 0); /* * Clear the interrupt registers. */ (void) serial_in(up, UART_LSR); (void) serial_in(up, UART_RX); (void) serial_in(up, UART_IIR); (void) serial_in(up, UART_MSR); /* * Now, initialize the UART */ serial_out(up, UART_LCR, UART_LCR_WLEN8); spin_lock_irqsave(&up->port.lock, flags); up->port.mctrl |= TIOCM_OUT2; serial_pxa_set_mctrl(&up->port, up->port.mctrl); spin_unlock_irqrestore(&up->port.lock, flags); /* * Finally, enable interrupts. Note: Modem status interrupts * are set via set_termios(), which will be occurring imminently * anyway, so we don't enable them here. */ up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE; serial_out(up, UART_IER, up->ier); /* * And clear the interrupt registers again for luck. */ (void) serial_in(up, UART_LSR); (void) serial_in(up, UART_RX); (void) serial_in(up, UART_IIR); (void) serial_in(up, UART_MSR); return 0; } static void serial_pxa_shutdown(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned long flags; free_irq(up->port.irq, up); /* * Disable interrupts from this port */ up->ier = 0; serial_out(up, UART_IER, 0); spin_lock_irqsave(&up->port.lock, flags); up->port.mctrl &= ~TIOCM_OUT2; serial_pxa_set_mctrl(&up->port, up->port.mctrl); spin_unlock_irqrestore(&up->port.lock, flags); /* * Disable break condition and FIFOs */ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC); serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_out(up, UART_FCR, 0); } static void serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned char cval, fcr = 0; unsigned long flags; unsigned int baud, quot; unsigned int dll; switch (termios->c_cflag & CSIZE) { case CS5: cval = UART_LCR_WLEN5; break; case CS6: cval = UART_LCR_WLEN6; break; case CS7: cval = UART_LCR_WLEN7; break; default: case CS8: cval = UART_LCR_WLEN8; break; } if (termios->c_cflag & CSTOPB) cval |= UART_LCR_STOP; if (termios->c_cflag & PARENB) cval |= UART_LCR_PARITY; if (!(termios->c_cflag & PARODD)) cval |= UART_LCR_EPAR; /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = uart_get_divisor(port, baud); if ((up->port.uartclk / quot) < (2400 * 16)) fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR1; else if ((up->port.uartclk / quot) < (230400 * 16)) fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR8; else fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR32; /* * Ok, we're now changing the port state. Do it with * interrupts disabled. */ spin_lock_irqsave(&up->port.lock, flags); /* * Ensure the port will be enabled. * This is required especially for serial console. */ up->ier |= UART_IER_UUE; /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; if (termios->c_iflag & INPCK) up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) up->port.read_status_mask |= UART_LSR_BI; /* * Characters to ignore */ up->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; if (termios->c_iflag & IGNBRK) { up->port.ignore_status_mask |= UART_LSR_BI; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_OE; } /* * ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) up->port.ignore_status_mask |= UART_LSR_DR; /* * CTS flow control flag and modem status interrupts */ up->ier &= ~UART_IER_MSI; if (UART_ENABLE_MS(&up->port, termios->c_cflag)) up->ier |= UART_IER_MSI; serial_out(up, UART_IER, up->ier); if (termios->c_cflag & CRTSCTS) up->mcr |= UART_MCR_AFE; else up->mcr &= ~UART_MCR_AFE; serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */ /* * work around Errata #75 according to Intel(R) PXA27x Processor Family * Specification Update (Nov 2005) */ dll = serial_in(up, UART_DLL); WARN_ON(dll != (quot & 0xff)); serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */ serial_out(up, UART_LCR, cval); /* reset DLAB */ up->lcr = cval; /* Save LCR */ serial_pxa_set_mctrl(&up->port, up->port.mctrl); serial_out(up, UART_FCR, fcr); spin_unlock_irqrestore(&up->port.lock, flags); } static void serial_pxa_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; if (!state) clk_prepare_enable(up->clk); else clk_disable_unprepare(up->clk); } static void serial_pxa_release_port(struct uart_port *port) { } static int serial_pxa_request_port(struct uart_port *port) { return 0; } static void serial_pxa_config_port(struct uart_port *port, int flags) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; up->port.type = PORT_PXA; } static int serial_pxa_verify_port(struct uart_port *port, struct serial_struct *ser) { /* we don't want the core code to modify any port params */ return -EINVAL; } static const char * serial_pxa_type(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; return up->name; } static struct uart_pxa_port *serial_pxa_ports[4]; static struct uart_driver serial_pxa_reg; #ifdef CONFIG_SERIAL_PXA_CONSOLE #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) /* * Wait for transmitter & holding register to empty */ static inline void wait_for_xmitr(struct uart_pxa_port *up) { unsigned int status, tmout = 10000; /* Wait up to 10ms for the character(s) to be sent. */ do { status = serial_in(up, UART_LSR); if (status & UART_LSR_BI) up->lsr_break_flag = UART_LSR_BI; if (--tmout == 0) break; udelay(1); } while ((status & BOTH_EMPTY) != BOTH_EMPTY); /* Wait up to 1s for flow control if necessary */ if (up->port.flags & UPF_CONS_FLOW) { tmout = 1000000; while (--tmout && ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0)) udelay(1); } } static void serial_pxa_console_putchar(struct uart_port *port, int ch) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; wait_for_xmitr(up); serial_out(up, UART_TX, ch); } /* * Print a string to the serial port trying not to disturb * any possible real use of the port... * * The console_lock must be held when we get here. */ static void serial_pxa_console_write(struct console *co, const char *s, unsigned int count) { struct uart_pxa_port *up = serial_pxa_ports[co->index]; unsigned int ier; unsigned long flags; int locked = 1; clk_enable(up->clk); local_irq_save(flags); if (up->port.sysrq) locked = 0; else if (oops_in_progress) locked = spin_trylock(&up->port.lock); else spin_lock(&up->port.lock); /* * First save the IER then disable the interrupts */ ier = serial_in(up, UART_IER); serial_out(up, UART_IER, UART_IER_UUE); uart_console_write(&up->port, s, count, serial_pxa_console_putchar); /* * Finally, wait for transmitter to become empty * and restore the IER */ wait_for_xmitr(up); serial_out(up, UART_IER, ier); if (locked) spin_unlock(&up->port.lock); local_irq_restore(flags); clk_disable(up->clk); } #ifdef CONFIG_CONSOLE_POLL /* * Console polling routines for writing and reading from the uart while * in an interrupt or debug context. */ static int serial_pxa_get_poll_char(struct uart_port *port) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; unsigned char lsr = serial_in(up, UART_LSR); while (!(lsr & UART_LSR_DR)) lsr = serial_in(up, UART_LSR); return serial_in(up, UART_RX); } static void serial_pxa_put_poll_char(struct uart_port *port, unsigned char c) { unsigned int ier; struct uart_pxa_port *up = (struct uart_pxa_port *)port; /* * First save the IER then disable the interrupts */ ier = serial_in(up, UART_IER); serial_out(up, UART_IER, UART_IER_UUE); wait_for_xmitr(up); /* * Send the character out. */ serial_out(up, UART_TX, c); /* * Finally, wait for transmitter to become empty * and restore the IER */ wait_for_xmitr(up); serial_out(up, UART_IER, ier); } #endif /* CONFIG_CONSOLE_POLL */ static int __init serial_pxa_console_setup(struct console *co, char *options) { struct uart_pxa_port *up; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; if (co->index == -1 || co->index >= serial_pxa_reg.nr) co->index = 0; up = serial_pxa_ports[co->index]; if (!up) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(&up->port, co, baud, parity, bits, flow); } static struct console serial_pxa_console = { .name = "ttyS", .write = serial_pxa_console_write, .device = uart_console_device, .setup = serial_pxa_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &serial_pxa_reg, }; #define PXA_CONSOLE &serial_pxa_console #else #define PXA_CONSOLE NULL #endif static struct uart_ops serial_pxa_pops = { .tx_empty = serial_pxa_tx_empty, .set_mctrl = serial_pxa_set_mctrl, .get_mctrl = serial_pxa_get_mctrl, .stop_tx = serial_pxa_stop_tx, .start_tx = serial_pxa_start_tx, .stop_rx = serial_pxa_stop_rx, .enable_ms = serial_pxa_enable_ms, .break_ctl = serial_pxa_break_ctl, .startup = serial_pxa_startup, .shutdown = serial_pxa_shutdown, .set_termios = serial_pxa_set_termios, .pm = serial_pxa_pm, .type = serial_pxa_type, .release_port = serial_pxa_release_port, .request_port = serial_pxa_request_port, .config_port = serial_pxa_config_port, .verify_port = serial_pxa_verify_port, #if defined(CONFIG_CONSOLE_POLL) && defined(CONFIG_SERIAL_PXA_CONSOLE) .poll_get_char = serial_pxa_get_poll_char, .poll_put_char = serial_pxa_put_poll_char, #endif }; static struct uart_driver serial_pxa_reg = { .owner = THIS_MODULE, .driver_name = "PXA serial", .dev_name = "ttyS", .major = TTY_MAJOR, .minor = 64, .nr = 4, .cons = PXA_CONSOLE, }; #ifdef CONFIG_PM static int serial_pxa_suspend(struct device *dev) { struct uart_pxa_port *sport = dev_get_drvdata(dev); if (sport) uart_suspend_port(&serial_pxa_reg, &sport->port); return 0; } static int serial_pxa_resume(struct device *dev) { struct uart_pxa_port *sport = dev_get_drvdata(dev); if (sport) uart_resume_port(&serial_pxa_reg, &sport->port); return 0; } static const struct dev_pm_ops serial_pxa_pm_ops = { .suspend = serial_pxa_suspend, .resume = serial_pxa_resume, }; #endif static const struct of_device_id serial_pxa_dt_ids[] = { { .compatible = "mrvl,pxa-uart", }, { .compatible = "mrvl,mmp-uart", }, {} }; MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids); static int serial_pxa_probe_dt(struct platform_device *pdev, struct uart_pxa_port *sport) { struct device_node *np = pdev->dev.of_node; int ret; if (!np) return 1; ret = of_alias_get_id(np, "serial"); if (ret < 0) { dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); return ret; } sport->port.line = ret; return 0; } static int serial_pxa_probe(struct platform_device *dev) { struct uart_pxa_port *sport; struct resource *mmres, *irqres; int ret; mmres = platform_get_resource(dev, IORESOURCE_MEM, 0); irqres = platform_get_resource(dev, IORESOURCE_IRQ, 0); if (!mmres || !irqres) return -ENODEV; sport = kzalloc(sizeof(struct uart_pxa_port), GFP_KERNEL); if (!sport) return -ENOMEM; sport->clk = clk_get(&dev->dev, NULL); if (IS_ERR(sport->clk)) { ret = PTR_ERR(sport->clk); goto err_free; } ret = clk_prepare(sport->clk); if (ret) { clk_put(sport->clk); goto err_free; } sport->port.type = PORT_PXA; sport->port.iotype = UPIO_MEM; sport->port.mapbase = mmres->start; sport->port.irq = irqres->start; sport->port.fifosize = 64; sport->port.ops = &serial_pxa_pops; sport->port.dev = &dev->dev; sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; sport->port.uartclk = clk_get_rate(sport->clk); ret = serial_pxa_probe_dt(dev, sport); if (ret > 0) sport->port.line = dev->id; else if (ret < 0) goto err_clk; snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1); sport->port.membase = ioremap(mmres->start, resource_size(mmres)); if (!sport->port.membase) { ret = -ENOMEM; goto err_clk; } serial_pxa_ports[sport->port.line] = sport; uart_add_one_port(&serial_pxa_reg, &sport->port); platform_set_drvdata(dev, sport); return 0; err_clk: clk_unprepare(sport->clk); clk_put(sport->clk); err_free: kfree(sport); return ret; } static int serial_pxa_remove(struct platform_device *dev) { struct uart_pxa_port *sport = platform_get_drvdata(dev); uart_remove_one_port(&serial_pxa_reg, &sport->port); clk_unprepare(sport->clk); clk_put(sport->clk); kfree(sport); return 0; } static struct platform_driver serial_pxa_driver = { .probe = serial_pxa_probe, .remove = serial_pxa_remove, .driver = { .name = "pxa2xx-uart", #ifdef CONFIG_PM .pm = &serial_pxa_pm_ops, #endif .of_match_table = serial_pxa_dt_ids, }, }; static int __init serial_pxa_init(void) { int ret; ret = uart_register_driver(&serial_pxa_reg); if (ret != 0) return ret; ret = platform_driver_register(&serial_pxa_driver); if (ret != 0) uart_unregister_driver(&serial_pxa_reg); return ret; } static void __exit serial_pxa_exit(void) { platform_driver_unregister(&serial_pxa_driver); uart_unregister_driver(&serial_pxa_reg); } module_init(serial_pxa_init); module_exit(serial_pxa_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-uart");
gpl-2.0
ch33kybutt/CCCP_kernel
net/ipv4/inet_connection_sock.c
759
21124
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Support for INET connection oriented protocols. * * Authors: See the TCP sources * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or(at your option) any later version. */ #include <linux/module.h> #include <linux/jhash.h> #include <net/inet_connection_sock.h> #include <net/inet_hashtables.h> #include <net/inet_timewait_sock.h> #include <net/ip.h> #include <net/route.h> #include <net/tcp_states.h> #include <net/xfrm.h> #ifdef INET_CSK_DEBUG const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; EXPORT_SYMBOL(inet_csk_timer_bug_msg); #endif /* * This struct holds the first and last local port number. */ struct local_ports sysctl_local_ports __read_mostly = { .lock = SEQLOCK_UNLOCKED, .range = { 32768, 61000 }, }; unsigned long *sysctl_local_reserved_ports; EXPORT_SYMBOL(sysctl_local_reserved_ports); void inet_get_local_port_range(int *low, int *high) { unsigned seq; do { seq = read_seqbegin(&sysctl_local_ports.lock); *low = sysctl_local_ports.range[0]; *high = sysctl_local_ports.range[1]; } while (read_seqretry(&sysctl_local_ports.lock, seq)); } EXPORT_SYMBOL(inet_get_local_port_range); int inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb) { const __be32 sk_rcv_saddr = inet_rcv_saddr(sk); struct sock *sk2; struct hlist_node *node; int reuse = sk->sk_reuse; /* * Unlike other sk lookup places we do not check * for sk_net here, since _all_ the socks listed * in tb->owners list belong to the same net - the * one this bucket belongs to. */ sk_for_each_bound(sk2, node, &tb->owners) { if (sk != sk2 && !inet_v6_ipv6only(sk2) && (!sk->sk_bound_dev_if || !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { if (!reuse || !sk2->sk_reuse || sk2->sk_state == TCP_LISTEN) { const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); if (!sk2_rcv_saddr || !sk_rcv_saddr || sk2_rcv_saddr == sk_rcv_saddr) break; } } } return node != NULL; } EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); /* Obtain a reference to a local port for the given sock, * if snum is zero it means select any available local port. */ int inet_csk_get_port(struct sock *sk, unsigned short snum) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct inet_bind_hashbucket *head; struct hlist_node *node; struct inet_bind_bucket *tb; int ret, attempts = 5; struct net *net = sock_net(sk); int smallest_size = -1, smallest_rover; local_bh_disable(); if (!snum) { int remaining, rover, low, high; again: inet_get_local_port_range(&low, &high); remaining = (high - low) + 1; smallest_rover = rover = net_random() % remaining + low; smallest_size = -1; do { if (inet_is_reserved_local_port(rover)) goto next_nolock; head = &hashinfo->bhash[inet_bhashfn(net, rover, hashinfo->bhash_size)]; spin_lock(&head->lock); inet_bind_bucket_for_each(tb, node, &head->chain) if (net_eq(ib_net(tb), net) && tb->port == rover) { if (tb->fastreuse > 0 && sk->sk_reuse && sk->sk_state != TCP_LISTEN && (tb->num_owners < smallest_size || smallest_size == -1)) { smallest_size = tb->num_owners; smallest_rover = rover; if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { spin_unlock(&head->lock); snum = smallest_rover; goto have_snum; } } goto next; } break; next: spin_unlock(&head->lock); next_nolock: if (++rover > high) rover = low; } while (--remaining > 0); /* Exhausted local port range during search? It is not * possible for us to be holding one of the bind hash * locks if this test triggers, because if 'remaining' * drops to zero, we broke out of the do/while loop at * the top level, not from the 'break;' statement. */ ret = 1; if (remaining <= 0) { if (smallest_size != -1) { snum = smallest_rover; goto have_snum; } goto fail; } /* OK, here is the one we will use. HEAD is * non-NULL and we hold it's mutex. */ snum = rover; } else { have_snum: head = &hashinfo->bhash[inet_bhashfn(net, snum, hashinfo->bhash_size)]; spin_lock(&head->lock); inet_bind_bucket_for_each(tb, node, &head->chain) if (net_eq(ib_net(tb), net) && tb->port == snum) goto tb_found; } tb = NULL; goto tb_not_found; tb_found: if (!hlist_empty(&tb->owners)) { if (tb->fastreuse > 0 && sk->sk_reuse && sk->sk_state != TCP_LISTEN && smallest_size == -1) { goto success; } else { ret = 1; if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { if (sk->sk_reuse && sk->sk_state != TCP_LISTEN && smallest_size != -1 && --attempts >= 0) { spin_unlock(&head->lock); goto again; } goto fail_unlock; } } } tb_not_found: ret = 1; if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, net, head, snum)) == NULL) goto fail_unlock; if (hlist_empty(&tb->owners)) { if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) tb->fastreuse = 1; else tb->fastreuse = 0; } else if (tb->fastreuse && (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) tb->fastreuse = 0; success: if (!inet_csk(sk)->icsk_bind_hash) inet_bind_hash(sk, tb, snum); WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); ret = 0; fail_unlock: spin_unlock(&head->lock); fail: local_bh_enable(); return ret; } EXPORT_SYMBOL_GPL(inet_csk_get_port); /* * Wait for an incoming connection, avoid race conditions. This must be called * with the socket locked. */ static int inet_csk_wait_for_connect(struct sock *sk, long timeo) { struct inet_connection_sock *icsk = inet_csk(sk); DEFINE_WAIT(wait); int err; /* * True wake-one mechanism for incoming connections: only * one process gets woken up, not the 'whole herd'. * Since we do not 'race & poll' for established sockets * anymore, the common case will execute the loop only once. * * Subtle issue: "add_wait_queue_exclusive()" will be added * after any current non-exclusive waiters, and we know that * it will always _stay_ after any new non-exclusive waiters * because all non-exclusive waiters are added at the * beginning of the wait-queue. As such, it's ok to "drop" * our exclusiveness temporarily when we get woken up without * having to remove and re-insert us on the wait queue. */ for (;;) { prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); release_sock(sk); if (reqsk_queue_empty(&icsk->icsk_accept_queue)) timeo = schedule_timeout(timeo); lock_sock(sk); err = 0; if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) break; err = -EINVAL; if (sk->sk_state != TCP_LISTEN) break; err = sock_intr_errno(timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!timeo) break; } finish_wait(sk_sleep(sk), &wait); return err; } /* * This will accept the next outstanding connection. */ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) { struct inet_connection_sock *icsk = inet_csk(sk); struct sock *newsk; int error; lock_sock(sk); /* We need to make sure that this socket is listening, * and that it has something pending. */ error = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out_err; /* Find already established connection */ if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); /* If this is a non blocking socket don't sleep */ error = -EAGAIN; if (!timeo) goto out_err; error = inet_csk_wait_for_connect(sk, timeo); if (error) goto out_err; } newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); WARN_ON(newsk->sk_state == TCP_SYN_RECV); out: release_sock(sk); return newsk; out_err: newsk = NULL; *err = error; goto out; } EXPORT_SYMBOL(inet_csk_accept); /* * Using different timers for retransmit, delayed acks and probes * We may wish use just one timer maintaining a list of expire jiffies * to optimize. */ void inet_csk_init_xmit_timers(struct sock *sk, void (*retransmit_handler)(unsigned long), void (*delack_handler)(unsigned long), void (*keepalive_handler)(unsigned long)) { struct inet_connection_sock *icsk = inet_csk(sk); setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler, (unsigned long)sk); setup_timer(&icsk->icsk_delack_timer, delack_handler, (unsigned long)sk); setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk); icsk->icsk_pending = icsk->icsk_ack.pending = 0; } EXPORT_SYMBOL(inet_csk_init_xmit_timers); void inet_csk_clear_xmit_timers(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; sk_stop_timer(sk, &icsk->icsk_retransmit_timer); sk_stop_timer(sk, &icsk->icsk_delack_timer); sk_stop_timer(sk, &sk->sk_timer); } EXPORT_SYMBOL(inet_csk_clear_xmit_timers); void inet_csk_delete_keepalive_timer(struct sock *sk) { sk_stop_timer(sk, &sk->sk_timer); } EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) { sk_reset_timer(sk, &sk->sk_timer, jiffies + len); } EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); struct dst_entry *inet_csk_route_req(struct sock *sk, const struct request_sock *req) { struct rtable *rt; const struct inet_request_sock *ireq = inet_rsk(req); struct ip_options *opt = inet_rsk(req)->opt; struct flowi fl = { .oif = sk->sk_bound_dev_if, .mark = sk->sk_mark, .nl_u = { .ip4_u = { .daddr = ((opt && opt->srr) ? opt->faddr : ireq->rmt_addr), .saddr = ireq->loc_addr, .tos = RT_CONN_FLAGS(sk) } }, .proto = sk->sk_protocol, .flags = inet_sk_flowi_flags(sk), .uli_u = { .ports = { .sport = inet_sk(sk)->inet_sport, .dport = ireq->rmt_port } } }; struct net *net = sock_net(sk); security_req_classify_flow(req, &fl); if (ip_route_output_flow(net, &rt, &fl, sk, 0)) goto no_route; if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) goto route_err; return &rt->u.dst; route_err: ip_rt_put(rt); no_route: IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } EXPORT_SYMBOL_GPL(inet_csk_route_req); static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, const u32 rnd, const u32 synq_hsize) { return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1); } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #define AF_INET_FAMILY(fam) ((fam) == AF_INET) #else #define AF_INET_FAMILY(fam) 1 #endif struct request_sock *inet_csk_search_req(const struct sock *sk, struct request_sock ***prevp, const __be16 rport, const __be32 raddr, const __be32 laddr) { const struct inet_connection_sock *icsk = inet_csk(sk); struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; struct request_sock *req, **prev; for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, lopt->nr_table_entries)]; (req = *prev) != NULL; prev = &req->dl_next) { const struct inet_request_sock *ireq = inet_rsk(req); if (ireq->rmt_port == rport && ireq->rmt_addr == raddr && ireq->loc_addr == laddr && AF_INET_FAMILY(req->rsk_ops->family)) { WARN_ON(req->sk); *prevp = prev; break; } } return req; } EXPORT_SYMBOL_GPL(inet_csk_search_req); void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, unsigned long timeout) { struct inet_connection_sock *icsk = inet_csk(sk); struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd, lopt->nr_table_entries); reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); inet_csk_reqsk_queue_added(sk, timeout); } /* Only thing we need from tcp.h */ extern int sysctl_tcp_synack_retries; EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); /* Decide when to expire the request and when to resend SYN-ACK */ static inline void syn_ack_recalc(struct request_sock *req, const int thresh, const int max_retries, const u8 rskq_defer_accept, int *expire, int *resend) { if (!rskq_defer_accept) { *expire = req->retrans >= thresh; *resend = 1; return; } *expire = req->retrans >= thresh && (!inet_rsk(req)->acked || req->retrans >= max_retries); /* * Do not resend while waiting for data after ACK, * start to resend on end of deferring period to give * last chance for data or ACK to create established socket. */ *resend = !inet_rsk(req)->acked || req->retrans >= rskq_defer_accept - 1; } void inet_csk_reqsk_queue_prune(struct sock *parent, const unsigned long interval, const unsigned long timeout, const unsigned long max_rto) { struct inet_connection_sock *icsk = inet_csk(parent); struct request_sock_queue *queue = &icsk->icsk_accept_queue; struct listen_sock *lopt = queue->listen_opt; int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; int thresh = max_retries; unsigned long now = jiffies; struct request_sock **reqp, *req; int i, budget; if (lopt == NULL || lopt->qlen == 0) return; /* Normally all the openreqs are young and become mature * (i.e. converted to established socket) for first timeout. * If synack was not acknowledged for 3 seconds, it means * one of the following things: synack was lost, ack was lost, * rtt is high or nobody planned to ack (i.e. synflood). * When server is a bit loaded, queue is populated with old * open requests, reducing effective size of queue. * When server is well loaded, queue size reduces to zero * after several minutes of work. It is not synflood, * it is normal operation. The solution is pruning * too old entries overriding normal timeout, when * situation becomes dangerous. * * Essentially, we reserve half of room for young * embrions; and abort old ones without pity, if old * ones are about to clog our table. */ if (lopt->qlen>>(lopt->max_qlen_log-1)) { int young = (lopt->qlen_young<<1); while (thresh > 2) { if (lopt->qlen < young) break; thresh--; young <<= 1; } } if (queue->rskq_defer_accept) max_retries = queue->rskq_defer_accept; budget = 2 * (lopt->nr_table_entries / (timeout / interval)); i = lopt->clock_hand; do { reqp=&lopt->syn_table[i]; while ((req = *reqp) != NULL) { if (time_after_eq(now, req->expires)) { int expire = 0, resend = 0; syn_ack_recalc(req, thresh, max_retries, queue->rskq_defer_accept, &expire, &resend); if (req->rsk_ops->syn_ack_timeout) req->rsk_ops->syn_ack_timeout(parent, req); if (!expire && (!resend || !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || inet_rsk(req)->acked)) { unsigned long timeo; if (req->retrans++ == 0) lopt->qlen_young--; timeo = min((timeout << req->retrans), max_rto); req->expires = now + timeo; reqp = &req->dl_next; continue; } /* Drop this request */ inet_csk_reqsk_queue_unlink(parent, req, reqp); reqsk_queue_removed(queue, req); reqsk_free(req); continue; } reqp = &req->dl_next; } i = (i + 1) & (lopt->nr_table_entries - 1); } while (--budget > 0); lopt->clock_hand = i; if (lopt->qlen) inet_csk_reset_keepalive_timer(parent, interval); } EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, const gfp_t priority) { struct sock *newsk = sk_clone(sk, priority); if (newsk != NULL) { struct inet_connection_sock *newicsk = inet_csk(newsk); newsk->sk_state = TCP_SYN_RECV; newicsk->icsk_bind_hash = NULL; inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port; inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port); inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; newsk->sk_write_space = sk_stream_write_space; newicsk->icsk_retransmits = 0; newicsk->icsk_backoff = 0; newicsk->icsk_probes_out = 0; /* Deinitialize accept_queue to trap illegal accesses. */ memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); security_inet_csk_clone(newsk, req); } return newsk; } EXPORT_SYMBOL_GPL(inet_csk_clone); /* * At this point, there should be no process reference to this * socket, and thus no user references at all. Therefore we * can assume the socket waitqueue is inactive and nobody will * try to jump onto it. */ void inet_csk_destroy_sock(struct sock *sk) { WARN_ON(sk->sk_state != TCP_CLOSE); WARN_ON(!sock_flag(sk, SOCK_DEAD)); /* It cannot be in hash table! */ WARN_ON(!sk_unhashed(sk)); /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); sk->sk_prot->destroy(sk); sk_stream_kill_queues(sk); xfrm_sk_free_policy(sk); sk_refcnt_debug_release(sk); percpu_counter_dec(sk->sk_prot->orphan_count); sock_put(sk); } EXPORT_SYMBOL(inet_csk_destroy_sock); int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) { struct inet_sock *inet = inet_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries); if (rc != 0) return rc; sk->sk_max_ack_backlog = 0; sk->sk_ack_backlog = 0; inet_csk_delack_init(sk); /* There is race window here: we announce ourselves listening, * but this transition is still not validated by get_port(). * It is OK, because this socket enters to hash table only * after validation is complete. */ sk->sk_state = TCP_LISTEN; if (!sk->sk_prot->get_port(sk, inet->inet_num)) { inet->inet_sport = htons(inet->inet_num); sk_dst_reset(sk); sk->sk_prot->hash(sk); return 0; } sk->sk_state = TCP_CLOSE; __reqsk_queue_destroy(&icsk->icsk_accept_queue); return -EADDRINUSE; } EXPORT_SYMBOL_GPL(inet_csk_listen_start); /* * This routine closes sockets which have been at least partially * opened, but not yet accepted. */ void inet_csk_listen_stop(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct request_sock *acc_req; struct request_sock *req; inet_csk_delete_keepalive_timer(sk); /* make all the listen_opt local to us */ acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue); /* Following specs, it would be better either to send FIN * (and enter FIN-WAIT-1, it is normal close) * or to send active reset (abort). * Certainly, it is pretty dangerous while synflood, but it is * bad justification for our negligence 8) * To be honest, we are not able to make either * of the variants now. --ANK */ reqsk_queue_destroy(&icsk->icsk_accept_queue); while ((req = acc_req) != NULL) { struct sock *child = req->sk; acc_req = req->dl_next; local_bh_disable(); bh_lock_sock(child); WARN_ON(sock_owned_by_user(child)); sock_hold(child); sk->sk_prot->disconnect(child, O_NONBLOCK); sock_orphan(child); percpu_counter_inc(sk->sk_prot->orphan_count); inet_csk_destroy_sock(child); bh_unlock_sock(child); local_bh_enable(); sock_put(child); sk_acceptq_removed(sk); __reqsk_free(req); } WARN_ON(sk->sk_ack_backlog); } EXPORT_SYMBOL_GPL(inet_csk_listen_stop); void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) { struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; const struct inet_sock *inet = inet_sk(sk); sin->sin_family = AF_INET; sin->sin_addr.s_addr = inet->inet_daddr; sin->sin_port = inet->inet_dport; } EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); #ifdef CONFIG_COMPAT int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_af_ops->compat_getsockopt != NULL) return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, optval, optlen); return icsk->icsk_af_ops->getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_af_ops->compat_setsockopt != NULL) return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, optval, optlen); return icsk->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt); #endif
gpl-2.0
mingit/mstcp_v0.89.4
arch/x86/platform/intel-mid/mrfl.c
1783
2412
/* * mrfl.c: Intel Merrifield platform specific setup code * * (C) Copyright 2013 Intel Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/init.h> #include <asm/apic.h> #include <asm/intel-mid.h> #include "intel_mid_weak_decls.h" static unsigned long __init tangier_calibrate_tsc(void) { unsigned long fast_calibrate; u32 lo, hi, ratio, fsb, bus_freq; /* *********************** */ /* Compute TSC:Ratio * FSB */ /* *********************** */ /* Compute Ratio */ rdmsr(MSR_PLATFORM_INFO, lo, hi); pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo); ratio = (lo >> 8) & 0xFF; pr_debug("ratio is %d\n", ratio); if (!ratio) { pr_err("Read a zero ratio, force tsc ratio to 4 ...\n"); ratio = 4; } /* Compute FSB */ rdmsr(MSR_FSB_FREQ, lo, hi); pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n", hi, lo); bus_freq = lo & 0x7; pr_debug("bus_freq = 0x%x\n", bus_freq); if (bus_freq == 0) fsb = FSB_FREQ_100SKU; else if (bus_freq == 1) fsb = FSB_FREQ_100SKU; else if (bus_freq == 2) fsb = FSB_FREQ_133SKU; else if (bus_freq == 3) fsb = FSB_FREQ_167SKU; else if (bus_freq == 4) fsb = FSB_FREQ_83SKU; else if (bus_freq == 5) fsb = FSB_FREQ_400SKU; else if (bus_freq == 6) fsb = FSB_FREQ_267SKU; else if (bus_freq == 7) fsb = FSB_FREQ_333SKU; else { BUG(); pr_err("Invalid bus_freq! Setting to minimal value!\n"); fsb = FSB_FREQ_100SKU; } /* TSC = FSB Freq * Resolved HFM Ratio */ fast_calibrate = ratio * fsb; pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate); /* ************************************ */ /* Calculate Local APIC Timer Frequency */ /* ************************************ */ lapic_timer_frequency = (fsb * 1000) / HZ; pr_debug("Setting lapic_timer_frequency = %d\n", lapic_timer_frequency); /* mark tsc clocksource as reliable */ set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE); if (fast_calibrate) return fast_calibrate; return 0; } static void __init tangier_arch_setup(void) { x86_platform.calibrate_tsc = tangier_calibrate_tsc; } /* tangier arch ops */ static struct intel_mid_ops tangier_ops = { .arch_setup = tangier_arch_setup, }; void *get_tangier_ops(void) { return &tangier_ops; }
gpl-2.0
RichardWithnell/net-next-sim
drivers/ide/gayle.c
1783
4512
/* * Amiga Gayle IDE Driver * * Created 9 Jul 1997 by Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/types.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/ide.h> #include <linux/init.h> #include <linux/zorro.h> #include <linux/module.h> #include <linux/platform_device.h> #include <asm/setup.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #include <asm/amigayle.h> /* * Offsets from one of the above bases */ #define GAYLE_CONTROL 0x101a /* * These are at different offsets from the base */ #define GAYLE_IRQ_4000 0xdd3020 /* MSB = 1, Harddisk is source of */ #define GAYLE_IRQ_1200 0xda9000 /* interrupt */ /* * Offset of the secondary port for IDE doublers * Note that GAYLE_CONTROL is NOT available then! */ #define GAYLE_NEXT_PORT 0x1000 #define GAYLE_NUM_HWIFS 2 #define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \ GAYLE_NUM_HWIFS-1) #define GAYLE_HAS_CONTROL_REG (!ide_doubler) static bool ide_doubler; module_param_named(doubler, ide_doubler, bool, 0); MODULE_PARM_DESC(doubler, "enable support for IDE doublers"); /* * Check and acknowledge the interrupt status */ static int gayle_test_irq(ide_hwif_t *hwif) { unsigned char ch; ch = z_readb(hwif->io_ports.irq_addr); if (!(ch & GAYLE_IRQ_IDE)) return 0; return 1; } static void gayle_a1200_clear_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; (void)z_readb(hwif->io_ports.status_addr); z_writeb(0x7c, hwif->io_ports.irq_addr); } static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base, unsigned long ctl, unsigned long irq_port) { int i; memset(hw, 0, sizeof(*hw)); hw->io_ports.data_addr = base; for (i = 1; i < 8; i++) hw->io_ports_array[i] = base + 2 + i * 4; hw->io_ports.ctl_addr = ctl; hw->io_ports.irq_addr = irq_port; hw->irq = IRQ_AMIGA_PORTS; } static const struct ide_port_ops gayle_a4000_port_ops = { .test_irq = gayle_test_irq, }; static const struct ide_port_ops gayle_a1200_port_ops = { .clear_irq = gayle_a1200_clear_irq, .test_irq = gayle_test_irq, }; static const struct ide_port_info gayle_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, .chipset = ide_generic, }; /* * Probe for a Gayle IDE interface (and optionally for an IDE doubler) */ static int __init amiga_gayle_ide_probe(struct platform_device *pdev) { struct resource *res; struct gayle_ide_platform_data *pdata; unsigned long base, ctrlport, irqport; unsigned int i; int error; struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; struct ide_port_info d = gayle_port_info; struct ide_host *host; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; if (!request_mem_region(res->start, resource_size(res), "IDE")) return -EBUSY; pdata = dev_get_platdata(&pdev->dev); pr_info("ide: Gayle IDE controller (A%u style%s)\n", pdata->explicit_ack ? 1200 : 4000, ide_doubler ? ", IDE doubler" : ""); base = (unsigned long)ZTWO_VADDR(pdata->base); ctrlport = 0; irqport = (unsigned long)ZTWO_VADDR(pdata->irqport); if (pdata->explicit_ack) d.port_ops = &gayle_a1200_port_ops; else d.port_ops = &gayle_a4000_port_ops; for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++, base += GAYLE_NEXT_PORT) { if (GAYLE_HAS_CONTROL_REG) ctrlport = base + GAYLE_CONTROL; gayle_setup_ports(&hw[i], base, ctrlport, irqport); hws[i] = &hw[i]; } error = ide_host_add(&d, hws, i, &host); if (error) goto out; platform_set_drvdata(pdev, host); return 0; out: release_mem_region(res->start, resource_size(res)); return error; } static int __exit amiga_gayle_ide_remove(struct platform_device *pdev) { struct ide_host *host = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ide_host_remove(host); release_mem_region(res->start, resource_size(res)); return 0; } static struct platform_driver amiga_gayle_ide_driver = { .remove = __exit_p(amiga_gayle_ide_remove), .driver = { .name = "amiga-gayle-ide", }, }; module_platform_driver_probe(amiga_gayle_ide_driver, amiga_gayle_ide_probe); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:amiga-gayle-ide");
gpl-2.0
int0x19/android_kernel_xiaomi_msm8992
arch/x86/kernel/cpu/intel_cacheinfo.c
2039
33618
/* * Routines to indentify caches on Intel CPU. * * Changes: * Venkatesh Pallipadi : Adding cache identification through cpuid(4) * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/compiler.h> #include <linux/cpu.h> #include <linux/sched.h> #include <linux/pci.h> #include <asm/processor.h> #include <linux/smp.h> #include <asm/amd_nb.h> #include <asm/smp.h> #define LVL_1_INST 1 #define LVL_1_DATA 2 #define LVL_2 3 #define LVL_3 4 #define LVL_TRACE 5 struct _cache_table { unsigned char descriptor; char cache_type; short size; }; #define MB(x) ((x) * 1024) /* All the cache descriptor types we care about (no TLB or trace cache entries) */ static const struct _cache_table __cpuinitconst cache_table[] = { { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */ { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */ { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */ { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */ { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */ { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */ { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */ { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */ { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */ { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */ { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */ { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */ { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */ { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */ { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */ { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */ { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */ { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */ { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */ { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */ { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */ { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */ { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */ { 0x00, 0, 0} }; enum _cache_type { CACHE_TYPE_NULL = 0, CACHE_TYPE_DATA = 1, CACHE_TYPE_INST = 2, CACHE_TYPE_UNIFIED = 3 }; union _cpuid4_leaf_eax { struct { enum _cache_type type:5; unsigned int level:3; unsigned int is_self_initializing:1; unsigned int is_fully_associative:1; unsigned int reserved:4; unsigned int num_threads_sharing:12; unsigned int num_cores_on_die:6; } split; u32 full; }; union _cpuid4_leaf_ebx { struct { unsigned int coherency_line_size:12; unsigned int physical_line_partition:10; unsigned int ways_of_associativity:10; } split; u32 full; }; union _cpuid4_leaf_ecx { struct { unsigned int number_of_sets:32; } split; u32 full; }; struct _cpuid4_info_regs { union _cpuid4_leaf_eax eax; union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ecx ecx; unsigned long size; struct amd_northbridge *nb; }; struct _cpuid4_info { struct _cpuid4_info_regs base; DECLARE_BITMAP(shared_cpu_map, NR_CPUS); }; unsigned short num_cache_leaves; /* AMD doesn't have CPUID4. Emulate it here to report the same information to the user. This makes some assumptions about the machine: L2 not shared, no SMT etc. that is currently true on AMD CPUs. In theory the TLBs could be reported as fake type (they are in "dummy"). Maybe later */ union l1_cache { struct { unsigned line_size:8; unsigned lines_per_tag:8; unsigned assoc:8; unsigned size_in_kb:8; }; unsigned val; }; union l2_cache { struct { unsigned line_size:8; unsigned lines_per_tag:4; unsigned assoc:4; unsigned size_in_kb:16; }; unsigned val; }; union l3_cache { struct { unsigned line_size:8; unsigned lines_per_tag:4; unsigned assoc:4; unsigned res:2; unsigned size_encoded:14; }; unsigned val; }; static const unsigned short __cpuinitconst assocs[] = { [1] = 1, [2] = 2, [4] = 4, [6] = 8, [8] = 16, [0xa] = 32, [0xb] = 48, [0xc] = 64, [0xd] = 96, [0xe] = 128, [0xf] = 0xffff /* fully associative - no way to show this currently */ }; static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, union _cpuid4_leaf_ebx *ebx, union _cpuid4_leaf_ecx *ecx) { unsigned dummy; unsigned line_size, lines_per_tag, assoc, size_in_kb; union l1_cache l1i, l1d; union l2_cache l2; union l3_cache l3; union l1_cache *l1 = &l1d; eax->full = 0; ebx->full = 0; ecx->full = 0; cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val); cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val); switch (leaf) { case 1: l1 = &l1i; case 0: if (!l1->val) return; assoc = assocs[l1->assoc]; line_size = l1->line_size; lines_per_tag = l1->lines_per_tag; size_in_kb = l1->size_in_kb; break; case 2: if (!l2.val) return; assoc = assocs[l2.assoc]; line_size = l2.line_size; lines_per_tag = l2.lines_per_tag; /* cpu_data has errata corrections for K7 applied */ size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); break; case 3: if (!l3.val) return; assoc = assocs[l3.assoc]; line_size = l3.line_size; lines_per_tag = l3.lines_per_tag; size_in_kb = l3.size_encoded * 512; if (boot_cpu_has(X86_FEATURE_AMD_DCM)) { size_in_kb = size_in_kb >> 1; assoc = assoc >> 1; } break; default: return; } eax->split.is_self_initializing = 1; eax->split.type = types[leaf]; eax->split.level = levels[leaf]; eax->split.num_threads_sharing = 0; eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; if (assoc == 0xffff) eax->split.is_fully_associative = 1; ebx->split.coherency_line_size = line_size - 1; ebx->split.ways_of_associativity = assoc - 1; ebx->split.physical_line_partition = lines_per_tag - 1; ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / (ebx->split.ways_of_associativity + 1) - 1; } struct _cache_attr { struct attribute attr; ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int); ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count, unsigned int); }; #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) /* * L3 cache descriptors */ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) { struct amd_l3_cache *l3 = &nb->l3_cache; unsigned int sc0, sc1, sc2, sc3; u32 val = 0; pci_read_config_dword(nb->misc, 0x1C4, &val); /* calculate subcache sizes */ l3->subcaches[0] = sc0 = !(val & BIT(0)); l3->subcaches[1] = sc1 = !(val & BIT(4)); if (boot_cpu_data.x86 == 0x15) { l3->subcaches[0] = sc0 += !(val & BIT(1)); l3->subcaches[1] = sc1 += !(val & BIT(5)); } l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; } static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) { int node; /* only for L3, and not in virtualized environments */ if (index < 3) return; node = amd_get_nb_id(smp_processor_id()); this_leaf->nb = node_to_amd_nb(node); if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) amd_calc_l3_indices(this_leaf->nb); } /* * check whether a slot used for disabling an L3 index is occupied. * @l3: L3 cache descriptor * @slot: slot number (0..1) * * @returns: the disabled index if used or negative value if slot free. */ int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) { unsigned int reg = 0; pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg); /* check whether this slot is activated already */ if (reg & (3UL << 30)) return reg & 0xfff; return -1; } static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, unsigned int slot) { int index; if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) return -EINVAL; index = amd_get_l3_disable_slot(this_leaf->base.nb, slot); if (index >= 0) return sprintf(buf, "%d\n", index); return sprintf(buf, "FREE\n"); } #define SHOW_CACHE_DISABLE(slot) \ static ssize_t \ show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ unsigned int cpu) \ { \ return show_cache_disable(this_leaf, buf, slot); \ } SHOW_CACHE_DISABLE(0) SHOW_CACHE_DISABLE(1) static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu, unsigned slot, unsigned long idx) { int i; idx |= BIT(30); /* * disable index in all 4 subcaches */ for (i = 0; i < 4; i++) { u32 reg = idx | (i << 20); if (!nb->l3_cache.subcaches[i]) continue; pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); /* * We need to WBINVD on a core on the node containing the L3 * cache which indices we disable therefore a simple wbinvd() * is not sufficient. */ wbinvd_on_cpu(cpu); reg |= BIT(31); pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); } } /* * disable a L3 cache index by using a disable-slot * * @l3: L3 cache descriptor * @cpu: A CPU on the node containing the L3 cache * @slot: slot number (0..1) * @index: index to disable * * @return: 0 on success, error status on failure */ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, unsigned long index) { int ret = 0; /* check if @slot is already used or the index is already disabled */ ret = amd_get_l3_disable_slot(nb, slot); if (ret >= 0) return -EEXIST; if (index > nb->l3_cache.indices) return -EINVAL; /* check whether the other slot has disabled the same index already */ if (index == amd_get_l3_disable_slot(nb, !slot)) return -EEXIST; amd_l3_disable_index(nb, cpu, slot, index); return 0; } static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, size_t count, unsigned int slot) { unsigned long val = 0; int cpu, err = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) return -EINVAL; cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); if (strict_strtoul(buf, 10, &val) < 0) return -EINVAL; err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); if (err) { if (err == -EEXIST) pr_warning("L3 slot %d in use/index already disabled!\n", slot); return err; } return count; } #define STORE_CACHE_DISABLE(slot) \ static ssize_t \ store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ const char *buf, size_t count, \ unsigned int cpu) \ { \ return store_cache_disable(this_leaf, buf, count, slot); \ } STORE_CACHE_DISABLE(0) STORE_CACHE_DISABLE(1) static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, show_cache_disable_0, store_cache_disable_0); static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, show_cache_disable_1, store_cache_disable_1); static ssize_t show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) { if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) return -EINVAL; return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); } static ssize_t store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, unsigned int cpu) { unsigned long val; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) return -EINVAL; if (strict_strtoul(buf, 16, &val) < 0) return -EINVAL; if (amd_set_subcaches(cpu, val)) return -EINVAL; return count; } static struct _cache_attr subcaches = __ATTR(subcaches, 0644, show_subcaches, store_subcaches); #else #define amd_init_l3_cache(x, y) #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ static int __cpuinit cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) { union _cpuid4_leaf_eax eax; union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ecx ecx; unsigned edx; if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { if (cpu_has_topoext) cpuid_count(0x8000001d, index, &eax.full, &ebx.full, &ecx.full, &edx); else amd_cpuid4(index, &eax, &ebx, &ecx); amd_init_l3_cache(this_leaf, index); } else { cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); } if (eax.split.type == CACHE_TYPE_NULL) return -EIO; /* better error ? */ this_leaf->eax = eax; this_leaf->ebx = ebx; this_leaf->ecx = ecx; this_leaf->size = (ecx.split.number_of_sets + 1) * (ebx.split.coherency_line_size + 1) * (ebx.split.physical_line_partition + 1) * (ebx.split.ways_of_associativity + 1); return 0; } static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c) { unsigned int eax, ebx, ecx, edx, op; union _cpuid4_leaf_eax cache_eax; int i = -1; if (c->x86_vendor == X86_VENDOR_AMD) op = 0x8000001d; else op = 4; do { ++i; /* Do cpuid(op) loop to find out num_cache_leaves */ cpuid_count(op, i, &eax, &ebx, &ecx, &edx); cache_eax.full = eax; } while (cache_eax.split.type != CACHE_TYPE_NULL); return i; } void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c) { if (cpu_has_topoext) { num_cache_leaves = find_num_cache_leaves(c); } else if (c->extended_cpuid_level >= 0x80000006) { if (cpuid_edx(0x80000006) & 0xf000) num_cache_leaves = 4; else num_cache_leaves = 3; } } unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) { /* Cache sizes */ unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; #ifdef CONFIG_X86_HT unsigned int cpu = c->cpu_index; #endif if (c->cpuid_level > 3) { static int is_initialized; if (is_initialized == 0) { /* Init num_cache_leaves from boot CPU */ num_cache_leaves = find_num_cache_leaves(c); is_initialized++; } /* * Whenever possible use cpuid(4), deterministic cache * parameters cpuid leaf to find the cache details */ for (i = 0; i < num_cache_leaves; i++) { struct _cpuid4_info_regs this_leaf; int retval; retval = cpuid4_cache_lookup_regs(i, &this_leaf); if (retval >= 0) { switch (this_leaf.eax.split.level) { case 1: if (this_leaf.eax.split.type == CACHE_TYPE_DATA) new_l1d = this_leaf.size/1024; else if (this_leaf.eax.split.type == CACHE_TYPE_INST) new_l1i = this_leaf.size/1024; break; case 2: new_l2 = this_leaf.size/1024; num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; index_msb = get_count_order(num_threads_sharing); l2_id = c->apicid & ~((1 << index_msb) - 1); break; case 3: new_l3 = this_leaf.size/1024; num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; index_msb = get_count_order( num_threads_sharing); l3_id = c->apicid & ~((1 << index_msb) - 1); break; default: break; } } } } /* * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for * trace cache */ if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { /* supports eax=2 call */ int j, n; unsigned int regs[4]; unsigned char *dp = (unsigned char *)regs; int only_trace = 0; if (num_cache_leaves != 0 && c->x86 == 15) only_trace = 1; /* Number of times to iterate */ n = cpuid_eax(2) & 0xFF; for (i = 0 ; i < n ; i++) { cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]); /* If bit 31 is set, this is an unknown format */ for (j = 0 ; j < 3 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; /* Byte 0 is level count, not a descriptor */ for (j = 1 ; j < 16 ; j++) { unsigned char des = dp[j]; unsigned char k = 0; /* look up this descriptor in the table */ while (cache_table[k].descriptor != 0) { if (cache_table[k].descriptor == des) { if (only_trace && cache_table[k].cache_type != LVL_TRACE) break; switch (cache_table[k].cache_type) { case LVL_1_INST: l1i += cache_table[k].size; break; case LVL_1_DATA: l1d += cache_table[k].size; break; case LVL_2: l2 += cache_table[k].size; break; case LVL_3: l3 += cache_table[k].size; break; case LVL_TRACE: trace += cache_table[k].size; break; } break; } k++; } } } } if (new_l1d) l1d = new_l1d; if (new_l1i) l1i = new_l1i; if (new_l2) { l2 = new_l2; #ifdef CONFIG_X86_HT per_cpu(cpu_llc_id, cpu) = l2_id; #endif } if (new_l3) { l3 = new_l3; #ifdef CONFIG_X86_HT per_cpu(cpu_llc_id, cpu) = l3_id; #endif } c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); return l2; } #ifdef CONFIG_SYSFS /* pointer to _cpuid4_info array (for each cache leaf) */ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) #ifdef CONFIG_SMP static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf; int i, sibling; if (cpu_has_topoext) { unsigned int apicid, nshared, first, last; if (!per_cpu(ici_cpuid4_info, cpu)) return 0; this_leaf = CPUID4_INFO_IDX(cpu, index); nshared = this_leaf->base.eax.split.num_threads_sharing + 1; apicid = cpu_data(cpu).apicid; first = apicid - (apicid % nshared); last = first + nshared - 1; for_each_online_cpu(i) { apicid = cpu_data(i).apicid; if ((apicid < first) || (apicid > last)) continue; if (!per_cpu(ici_cpuid4_info, i)) continue; this_leaf = CPUID4_INFO_IDX(i, index); for_each_online_cpu(sibling) { apicid = cpu_data(sibling).apicid; if ((apicid < first) || (apicid > last)) continue; set_bit(sibling, this_leaf->shared_cpu_map); } } } else if (index == 3) { for_each_cpu(i, cpu_llc_shared_mask(cpu)) { if (!per_cpu(ici_cpuid4_info, i)) continue; this_leaf = CPUID4_INFO_IDX(i, index); for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { if (!cpu_online(sibling)) continue; set_bit(sibling, this_leaf->shared_cpu_map); } } } else return 0; return 1; } static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf, *sibling_leaf; unsigned long num_threads_sharing; int index_msb, i; struct cpuinfo_x86 *c = &cpu_data(cpu); if (c->x86_vendor == X86_VENDOR_AMD) { if (cache_shared_amd_cpu_map_setup(cpu, index)) return; } this_leaf = CPUID4_INFO_IDX(cpu, index); num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; if (num_threads_sharing == 1) cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); else { index_msb = get_count_order(num_threads_sharing); for_each_online_cpu(i) { if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) { cpumask_set_cpu(i, to_cpumask(this_leaf->shared_cpu_map)); if (i != cpu && per_cpu(ici_cpuid4_info, i)) { sibling_leaf = CPUID4_INFO_IDX(i, index); cpumask_set_cpu(cpu, to_cpumask( sibling_leaf->shared_cpu_map)); } } } } } static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf, *sibling_leaf; int sibling; this_leaf = CPUID4_INFO_IDX(cpu, index); for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { sibling_leaf = CPUID4_INFO_IDX(sibling, index); cpumask_clear_cpu(cpu, to_cpumask(sibling_leaf->shared_cpu_map)); } } #else static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) { } static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { } #endif static void __cpuinit free_cache_attributes(unsigned int cpu) { int i; for (i = 0; i < num_cache_leaves; i++) cache_remove_shared_cpu_map(cpu, i); kfree(per_cpu(ici_cpuid4_info, cpu)); per_cpu(ici_cpuid4_info, cpu) = NULL; } static void __cpuinit get_cpu_leaves(void *_retval) { int j, *retval = _retval, cpu = smp_processor_id(); /* Do cpuid and store the results */ for (j = 0; j < num_cache_leaves; j++) { struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j); *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base); if (unlikely(*retval < 0)) { int i; for (i = 0; i < j; i++) cache_remove_shared_cpu_map(cpu, i); break; } cache_shared_cpu_map_setup(cpu, j); } } static int __cpuinit detect_cache_attributes(unsigned int cpu) { int retval; if (num_cache_leaves == 0) return -ENOENT; per_cpu(ici_cpuid4_info, cpu) = kzalloc( sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); if (per_cpu(ici_cpuid4_info, cpu) == NULL) return -ENOMEM; smp_call_function_single(cpu, get_cpu_leaves, &retval, true); if (retval) { kfree(per_cpu(ici_cpuid4_info, cpu)); per_cpu(ici_cpuid4_info, cpu) = NULL; } return retval; } #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/cpu.h> /* pointer to kobject for cpuX/cache */ static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject); struct _index_kobject { struct kobject kobj; unsigned int cpu; unsigned short index; }; /* pointer to array of kobjects for cpuX/cache/indexY */ static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) #define show_one_plus(file_name, object, val) \ static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \ unsigned int cpu) \ { \ return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ } show_one_plus(level, base.eax.split.level, 0); show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1); show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1); show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1); show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1); static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) { return sprintf(buf, "%luK\n", this_leaf->base.size / 1024); } static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, int type, char *buf) { ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; int n = 0; if (len > 1) { const struct cpumask *mask; mask = to_cpumask(this_leaf->shared_cpu_map); n = type ? cpulist_scnprintf(buf, len-2, mask) : cpumask_scnprintf(buf, len-2, mask); buf[n++] = '\n'; buf[n] = '\0'; } return n; } static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf, unsigned int cpu) { return show_shared_cpu_map_func(leaf, 0, buf); } static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf, unsigned int cpu) { return show_shared_cpu_map_func(leaf, 1, buf); } static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) { switch (this_leaf->base.eax.split.type) { case CACHE_TYPE_DATA: return sprintf(buf, "Data\n"); case CACHE_TYPE_INST: return sprintf(buf, "Instruction\n"); case CACHE_TYPE_UNIFIED: return sprintf(buf, "Unified\n"); default: return sprintf(buf, "Unknown\n"); } } #define to_object(k) container_of(k, struct _index_kobject, kobj) #define to_attr(a) container_of(a, struct _cache_attr, attr) #define define_one_ro(_name) \ static struct _cache_attr _name = \ __ATTR(_name, 0444, show_##_name, NULL) define_one_ro(level); define_one_ro(type); define_one_ro(coherency_line_size); define_one_ro(physical_line_partition); define_one_ro(ways_of_associativity); define_one_ro(number_of_sets); define_one_ro(size); define_one_ro(shared_cpu_map); define_one_ro(shared_cpu_list); static struct attribute *default_attrs[] = { &type.attr, &level.attr, &coherency_line_size.attr, &physical_line_partition.attr, &ways_of_associativity.attr, &number_of_sets.attr, &size.attr, &shared_cpu_map.attr, &shared_cpu_list.attr, NULL }; #ifdef CONFIG_AMD_NB static struct attribute ** __cpuinit amd_l3_attrs(void) { static struct attribute **attrs; int n; if (attrs) return attrs; n = ARRAY_SIZE(default_attrs); if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) n += 2; if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) n += 1; attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); if (attrs == NULL) return attrs = default_attrs; for (n = 0; default_attrs[n]; n++) attrs[n] = default_attrs[n]; if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { attrs[n++] = &cache_disable_0.attr; attrs[n++] = &cache_disable_1.attr; } if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) attrs[n++] = &subcaches.attr; return attrs; } #endif static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct _cache_attr *fattr = to_attr(attr); struct _index_kobject *this_leaf = to_object(kobj); ssize_t ret; ret = fattr->show ? fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), buf, this_leaf->cpu) : 0; return ret; } static ssize_t store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct _cache_attr *fattr = to_attr(attr); struct _index_kobject *this_leaf = to_object(kobj); ssize_t ret; ret = fattr->store ? fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), buf, count, this_leaf->cpu) : 0; return ret; } static const struct sysfs_ops sysfs_ops = { .show = show, .store = store, }; static struct kobj_type ktype_cache = { .sysfs_ops = &sysfs_ops, .default_attrs = default_attrs, }; static struct kobj_type ktype_percpu_entry = { .sysfs_ops = &sysfs_ops, }; static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) { kfree(per_cpu(ici_cache_kobject, cpu)); kfree(per_cpu(ici_index_kobject, cpu)); per_cpu(ici_cache_kobject, cpu) = NULL; per_cpu(ici_index_kobject, cpu) = NULL; free_cache_attributes(cpu); } static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) { int err; if (num_cache_leaves == 0) return -ENOENT; err = detect_cache_attributes(cpu); if (err) return err; /* Allocate all required memory */ per_cpu(ici_cache_kobject, cpu) = kzalloc(sizeof(struct kobject), GFP_KERNEL); if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL)) goto err_out; per_cpu(ici_index_kobject, cpu) = kzalloc( sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL)) goto err_out; return 0; err_out: cpuid4_cache_sysfs_exit(cpu); return -ENOMEM; } static DECLARE_BITMAP(cache_dev_map, NR_CPUS); /* Add/Remove cache interface for CPU device */ static int __cpuinit cache_add_dev(struct device *dev) { unsigned int cpu = dev->id; unsigned long i, j; struct _index_kobject *this_object; struct _cpuid4_info *this_leaf; int retval; retval = cpuid4_cache_sysfs_init(cpu); if (unlikely(retval < 0)) return retval; retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu), &ktype_percpu_entry, &dev->kobj, "%s", "cache"); if (retval < 0) { cpuid4_cache_sysfs_exit(cpu); return retval; } for (i = 0; i < num_cache_leaves; i++) { this_object = INDEX_KOBJECT_PTR(cpu, i); this_object->cpu = cpu; this_object->index = i; this_leaf = CPUID4_INFO_IDX(cpu, i); ktype_cache.default_attrs = default_attrs; #ifdef CONFIG_AMD_NB if (this_leaf->base.nb) ktype_cache.default_attrs = amd_l3_attrs(); #endif retval = kobject_init_and_add(&(this_object->kobj), &ktype_cache, per_cpu(ici_cache_kobject, cpu), "index%1lu", i); if (unlikely(retval)) { for (j = 0; j < i; j++) kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); kobject_put(per_cpu(ici_cache_kobject, cpu)); cpuid4_cache_sysfs_exit(cpu); return retval; } kobject_uevent(&(this_object->kobj), KOBJ_ADD); } cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD); return 0; } static void __cpuinit cache_remove_dev(struct device *dev) { unsigned int cpu = dev->id; unsigned long i; if (per_cpu(ici_cpuid4_info, cpu) == NULL) return; if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) return; cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); for (i = 0; i < num_cache_leaves; i++) kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); kobject_put(per_cpu(ici_cache_kobject, cpu)); cpuid4_cache_sysfs_exit(cpu); } static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct device *dev; dev = get_cpu_device(cpu); switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: cache_add_dev(dev); break; case CPU_DEAD: case CPU_DEAD_FROZEN: cache_remove_dev(dev); break; } return NOTIFY_OK; } static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { .notifier_call = cacheinfo_cpu_callback, }; static int __init cache_sysfs_init(void) { int i; if (num_cache_leaves == 0) return 0; for_each_online_cpu(i) { int err; struct device *dev = get_cpu_device(i); err = cache_add_dev(dev); if (err) return err; } register_hotcpu_notifier(&cacheinfo_cpu_notifier); return 0; } device_initcall(cache_sysfs_init); #endif
gpl-2.0
dekkyy1/onex_3.1.10-JB_kernel
drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox.c
2807
26295
//------------------------------------------------------------------------------ // <copyright file="ar6k_gmbox.c" company="Atheros"> // Copyright (c) 2007-2010 Atheros Corporation. All rights reserved. // // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // // //------------------------------------------------------------------------------ //============================================================================== // Generic MBOX API implementation // // Author(s): ="Atheros" //============================================================================== #include "a_config.h" #include "athdefs.h" #include "a_osapi.h" #include "../htc_debug.h" #include "hif.h" #include "htc_packet.h" #include "ar6k.h" #include "hw/mbox_host_reg.h" #include "gmboxif.h" /* * This file provides management functions and a toolbox for GMBOX protocol modules. * Only one protocol module can be installed at a time. The determination of which protocol * module is installed is determined at compile time. * */ #ifdef ATH_AR6K_ENABLE_GMBOX /* GMBOX definitions */ #define GMBOX_INT_STATUS_ENABLE_REG 0x488 #define GMBOX_INT_STATUS_RX_DATA (1 << 0) #define GMBOX_INT_STATUS_TX_OVERFLOW (1 << 1) #define GMBOX_INT_STATUS_RX_OVERFLOW (1 << 2) #define GMBOX_LOOKAHEAD_MUX_REG 0x498 #define GMBOX_LA_MUX_OVERRIDE_2_3 (1 << 0) #define AR6K_GMBOX_CREDIT_DEC_ADDRESS (COUNT_DEC_ADDRESS + 4 * AR6K_GMBOX_CREDIT_COUNTER) #define AR6K_GMBOX_CREDIT_SIZE_ADDRESS (COUNT_ADDRESS + AR6K_GMBOX_CREDIT_SIZE_COUNTER) /* external APIs for allocating and freeing internal I/O packets to handle ASYNC I/O */ extern void AR6KFreeIOPacket(struct ar6k_device *pDev, struct htc_packet *pPacket); extern struct htc_packet *AR6KAllocIOPacket(struct ar6k_device *pDev); /* callback when our fetch to enable/disable completes */ static void DevGMboxIRQActionAsyncHandler(void *Context, struct htc_packet *pPacket) { struct ar6k_device *pDev = (struct ar6k_device *)Context; AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevGMboxIRQActionAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev)); if (pPacket->Status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("IRQAction Operation (%d) failed! status:%d \n", pPacket->PktInfo.AsRx.HTCRxFlags,pPacket->Status)); } /* free this IO packet */ AR6KFreeIOPacket(pDev,pPacket); AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevGMboxIRQActionAsyncHandler \n")); } static int DevGMboxCounterEnableDisable(struct ar6k_device *pDev, GMBOX_IRQ_ACTION_TYPE IrqAction, bool AsyncMode) { int status = 0; struct ar6k_irq_enable_registers regs; struct htc_packet *pIOPacket = NULL; LOCK_AR6K(pDev); if (GMBOX_CREDIT_IRQ_ENABLE == IrqAction) { pDev->GMboxInfo.CreditCountIRQEnabled = true; pDev->IrqEnableRegisters.counter_int_status_enable |= COUNTER_INT_STATUS_ENABLE_BIT_SET(1 << AR6K_GMBOX_CREDIT_COUNTER); pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_COUNTER_SET(0x01); } else { pDev->GMboxInfo.CreditCountIRQEnabled = false; pDev->IrqEnableRegisters.counter_int_status_enable &= ~(COUNTER_INT_STATUS_ENABLE_BIT_SET(1 << AR6K_GMBOX_CREDIT_COUNTER)); } /* copy into our temp area */ memcpy(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE); UNLOCK_AR6K(pDev); do { if (AsyncMode) { pIOPacket = AR6KAllocIOPacket(pDev); if (NULL == pIOPacket) { status = A_NO_MEMORY; A_ASSERT(false); break; } /* copy values to write to our async I/O buffer */ memcpy(pIOPacket->pBuffer,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE); /* stick in our completion routine when the I/O operation completes */ pIOPacket->Completion = DevGMboxIRQActionAsyncHandler; pIOPacket->pContext = pDev; pIOPacket->PktInfo.AsRx.HTCRxFlags = IrqAction; /* write it out asynchronously */ HIFReadWrite(pDev->HIFDevice, INT_STATUS_ENABLE_ADDRESS, pIOPacket->pBuffer, AR6K_IRQ_ENABLE_REGS_SIZE, HIF_WR_ASYNC_BYTE_INC, pIOPacket); pIOPacket = NULL; break; } /* if we get here we are doing it synchronously */ status = HIFReadWrite(pDev->HIFDevice, INT_STATUS_ENABLE_ADDRESS, &regs.int_status_enable, AR6K_IRQ_ENABLE_REGS_SIZE, HIF_WR_SYNC_BYTE_INC, NULL); } while (false); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (" IRQAction Operation (%d) failed! status:%d \n", IrqAction, status)); } else { if (!AsyncMode) { AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, (" IRQAction Operation (%d) success \n", IrqAction)); } } if (pIOPacket != NULL) { AR6KFreeIOPacket(pDev,pIOPacket); } return status; } int DevGMboxIRQAction(struct ar6k_device *pDev, GMBOX_IRQ_ACTION_TYPE IrqAction, bool AsyncMode) { int status = 0; struct htc_packet *pIOPacket = NULL; u8 GMboxIntControl[4]; if (GMBOX_CREDIT_IRQ_ENABLE == IrqAction) { return DevGMboxCounterEnableDisable(pDev, GMBOX_CREDIT_IRQ_ENABLE, AsyncMode); } else if(GMBOX_CREDIT_IRQ_DISABLE == IrqAction) { return DevGMboxCounterEnableDisable(pDev, GMBOX_CREDIT_IRQ_DISABLE, AsyncMode); } if (GMBOX_DISABLE_ALL == IrqAction) { /* disable credit IRQ, those are on a different set of registers */ DevGMboxCounterEnableDisable(pDev, GMBOX_CREDIT_IRQ_DISABLE, AsyncMode); } /* take the lock to protect interrupt enable shadows */ LOCK_AR6K(pDev); switch (IrqAction) { case GMBOX_DISABLE_ALL: pDev->GMboxControlRegisters.int_status_enable = 0; break; case GMBOX_ERRORS_IRQ_ENABLE: pDev->GMboxControlRegisters.int_status_enable |= GMBOX_INT_STATUS_TX_OVERFLOW | GMBOX_INT_STATUS_RX_OVERFLOW; break; case GMBOX_RECV_IRQ_ENABLE: pDev->GMboxControlRegisters.int_status_enable |= GMBOX_INT_STATUS_RX_DATA; break; case GMBOX_RECV_IRQ_DISABLE: pDev->GMboxControlRegisters.int_status_enable &= ~GMBOX_INT_STATUS_RX_DATA; break; case GMBOX_ACTION_NONE: default: A_ASSERT(false); break; } GMboxIntControl[0] = pDev->GMboxControlRegisters.int_status_enable; GMboxIntControl[1] = GMboxIntControl[0]; GMboxIntControl[2] = GMboxIntControl[0]; GMboxIntControl[3] = GMboxIntControl[0]; UNLOCK_AR6K(pDev); do { if (AsyncMode) { pIOPacket = AR6KAllocIOPacket(pDev); if (NULL == pIOPacket) { status = A_NO_MEMORY; A_ASSERT(false); break; } /* copy values to write to our async I/O buffer */ memcpy(pIOPacket->pBuffer,GMboxIntControl,sizeof(GMboxIntControl)); /* stick in our completion routine when the I/O operation completes */ pIOPacket->Completion = DevGMboxIRQActionAsyncHandler; pIOPacket->pContext = pDev; pIOPacket->PktInfo.AsRx.HTCRxFlags = IrqAction; /* write it out asynchronously */ HIFReadWrite(pDev->HIFDevice, GMBOX_INT_STATUS_ENABLE_REG, pIOPacket->pBuffer, sizeof(GMboxIntControl), HIF_WR_ASYNC_BYTE_FIX, pIOPacket); pIOPacket = NULL; break; } /* if we get here we are doing it synchronously */ status = HIFReadWrite(pDev->HIFDevice, GMBOX_INT_STATUS_ENABLE_REG, GMboxIntControl, sizeof(GMboxIntControl), HIF_WR_SYNC_BYTE_FIX, NULL); } while (false); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (" IRQAction Operation (%d) failed! status:%d \n", IrqAction, status)); } else { if (!AsyncMode) { AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, (" IRQAction Operation (%d) success \n", IrqAction)); } } if (pIOPacket != NULL) { AR6KFreeIOPacket(pDev,pIOPacket); } return status; } void DevCleanupGMbox(struct ar6k_device *pDev) { if (pDev->GMboxEnabled) { pDev->GMboxEnabled = false; GMboxProtocolUninstall(pDev); } } int DevSetupGMbox(struct ar6k_device *pDev) { int status = 0; u8 muxControl[4]; do { if (0 == pDev->MailBoxInfo.GMboxAddress) { break; } AR_DEBUG_PRINTF(ATH_DEBUG_ANY,(" GMBOX Advertised: Address:0x%X , size:%d \n", pDev->MailBoxInfo.GMboxAddress, pDev->MailBoxInfo.GMboxSize)); status = DevGMboxIRQAction(pDev, GMBOX_DISABLE_ALL, PROC_IO_SYNC); if (status) { break; } /* write to mailbox look ahead mux control register, we want the * GMBOX lookaheads to appear on lookaheads 2 and 3 * the register is 1-byte wide so we need to hit it 4 times to align the operation * to 4-bytes */ muxControl[0] = GMBOX_LA_MUX_OVERRIDE_2_3; muxControl[1] = GMBOX_LA_MUX_OVERRIDE_2_3; muxControl[2] = GMBOX_LA_MUX_OVERRIDE_2_3; muxControl[3] = GMBOX_LA_MUX_OVERRIDE_2_3; status = HIFReadWrite(pDev->HIFDevice, GMBOX_LOOKAHEAD_MUX_REG, muxControl, sizeof(muxControl), HIF_WR_SYNC_BYTE_FIX, /* hit this register 4 times */ NULL); if (status) { break; } status = GMboxProtocolInstall(pDev); if (status) { break; } pDev->GMboxEnabled = true; } while (false); return status; } int DevCheckGMboxInterrupts(struct ar6k_device *pDev) { int status = 0; u8 counter_int_status; int credits; u8 host_int_status2; AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("+DevCheckGMboxInterrupts \n")); /* the caller guarantees that this is a context that allows for blocking I/O */ do { host_int_status2 = pDev->IrqProcRegisters.host_int_status2 & pDev->GMboxControlRegisters.int_status_enable; if (host_int_status2 & GMBOX_INT_STATUS_TX_OVERFLOW) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("GMBOX : TX Overflow \n")); status = A_ECOMM; } if (host_int_status2 & GMBOX_INT_STATUS_RX_OVERFLOW) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("GMBOX : RX Overflow \n")); status = A_ECOMM; } if (status) { if (pDev->GMboxInfo.pTargetFailureCallback != NULL) { pDev->GMboxInfo.pTargetFailureCallback(pDev->GMboxInfo.pProtocolContext, status); } break; } if (host_int_status2 & GMBOX_INT_STATUS_RX_DATA) { if (pDev->IrqProcRegisters.gmbox_rx_avail > 0) { A_ASSERT(pDev->GMboxInfo.pMessagePendingCallBack != NULL); status = pDev->GMboxInfo.pMessagePendingCallBack( pDev->GMboxInfo.pProtocolContext, (u8 *)&pDev->IrqProcRegisters.rx_gmbox_lookahead_alias[0], pDev->IrqProcRegisters.gmbox_rx_avail); } } if (status) { break; } counter_int_status = pDev->IrqProcRegisters.counter_int_status & pDev->IrqEnableRegisters.counter_int_status_enable; /* check if credit interrupt is pending */ if (counter_int_status & (COUNTER_INT_STATUS_ENABLE_BIT_SET(1 << AR6K_GMBOX_CREDIT_COUNTER))) { /* do synchronous read */ status = DevGMboxReadCreditCounter(pDev, PROC_IO_SYNC, &credits); if (status) { break; } A_ASSERT(pDev->GMboxInfo.pCreditsPendingCallback != NULL); status = pDev->GMboxInfo.pCreditsPendingCallback(pDev->GMboxInfo.pProtocolContext, credits, pDev->GMboxInfo.CreditCountIRQEnabled); } } while (false); AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("-DevCheckGMboxInterrupts (%d) \n",status)); return status; } int DevGMboxWrite(struct ar6k_device *pDev, struct htc_packet *pPacket, u32 WriteLength) { u32 paddedLength; bool sync = (pPacket->Completion == NULL) ? true : false; int status; u32 address; /* adjust the length to be a multiple of block size if appropriate */ paddedLength = DEV_CALC_SEND_PADDED_LEN(pDev, WriteLength); AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("DevGMboxWrite, Padded Length: %d Mbox:0x%X (mode:%s)\n", WriteLength, pDev->MailBoxInfo.GMboxAddress, sync ? "SYNC" : "ASYNC")); /* last byte of packet has to hit the EOM marker */ address = pDev->MailBoxInfo.GMboxAddress + pDev->MailBoxInfo.GMboxSize - paddedLength; status = HIFReadWrite(pDev->HIFDevice, address, pPacket->pBuffer, paddedLength, /* the padded length */ sync ? HIF_WR_SYNC_BLOCK_INC : HIF_WR_ASYNC_BLOCK_INC, sync ? NULL : pPacket); /* pass the packet as the context to the HIF request */ if (sync) { pPacket->Status = status; } else { if (status == A_PENDING) { status = 0; } } return status; } int DevGMboxRead(struct ar6k_device *pDev, struct htc_packet *pPacket, u32 ReadLength) { u32 paddedLength; int status; bool sync = (pPacket->Completion == NULL) ? true : false; /* adjust the length to be a multiple of block size if appropriate */ paddedLength = DEV_CALC_RECV_PADDED_LEN(pDev, ReadLength); if (paddedLength > pPacket->BufferLength) { A_ASSERT(false); AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("DevGMboxRead, Not enough space for padlen:%d recvlen:%d bufferlen:%d \n", paddedLength,ReadLength,pPacket->BufferLength)); if (pPacket->Completion != NULL) { COMPLETE_HTC_PACKET(pPacket,A_EINVAL); return 0; } return A_EINVAL; } AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("DevGMboxRead (0x%lX : hdr:0x%X) Padded Length: %d Mbox:0x%X (mode:%s)\n", (unsigned long)pPacket, pPacket->PktInfo.AsRx.ExpectedHdr, paddedLength, pDev->MailBoxInfo.GMboxAddress, sync ? "SYNC" : "ASYNC")); status = HIFReadWrite(pDev->HIFDevice, pDev->MailBoxInfo.GMboxAddress, pPacket->pBuffer, paddedLength, sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX, sync ? NULL : pPacket); /* pass the packet as the context to the HIF request */ if (sync) { pPacket->Status = status; } return status; } static int ProcessCreditCounterReadBuffer(u8 *pBuffer, int Length) { int credits = 0; /* theory of how this works: * We read the credit decrement register multiple times on a byte-wide basis. * The number of times (32) aligns the I/O operation to be a multiple of 4 bytes and provides a * reasonable chance to acquire "all" pending credits in a single I/O operation. * * Once we obtain the filled buffer, we can walk through it looking for credit decrement transitions. * Each non-zero byte represents a single credit decrement (which is a credit given back to the host) * For example if the target provides 3 credits and added 4 more during the 32-byte read operation the following * pattern "could" appear: * * 0x3 0x2 0x1 0x0 0x0 0x0 0x0 0x0 0x1 0x0 0x1 0x0 0x1 0x0 0x1 0x0 ......rest zeros * <---------> <-----------------------------> * \_ credits aleady there \_ target adding 4 more credits * * The total available credits would be 7, since there are 7 non-zero bytes in the buffer. * * */ if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) { DebugDumpBytes(pBuffer, Length, "GMBOX Credit read buffer"); } while (Length) { if (*pBuffer != 0) { credits++; } Length--; pBuffer++; } return credits; } /* callback when our fetch to enable/disable completes */ static void DevGMboxReadCreditsAsyncHandler(void *Context, struct htc_packet *pPacket) { struct ar6k_device *pDev = (struct ar6k_device *)Context; AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevGMboxReadCreditsAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev)); if (pPacket->Status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Read Credit Operation failed! status:%d \n", pPacket->Status)); } else { int credits = 0; credits = ProcessCreditCounterReadBuffer(pPacket->pBuffer, AR6K_REG_IO_BUFFER_SIZE); pDev->GMboxInfo.pCreditsPendingCallback(pDev->GMboxInfo.pProtocolContext, credits, pDev->GMboxInfo.CreditCountIRQEnabled); } /* free this IO packet */ AR6KFreeIOPacket(pDev,pPacket); AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevGMboxReadCreditsAsyncHandler \n")); } int DevGMboxReadCreditCounter(struct ar6k_device *pDev, bool AsyncMode, int *pCredits) { int status = 0; struct htc_packet *pIOPacket = NULL; AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+DevGMboxReadCreditCounter (%s) \n", AsyncMode ? "ASYNC" : "SYNC")); do { pIOPacket = AR6KAllocIOPacket(pDev); if (NULL == pIOPacket) { status = A_NO_MEMORY; A_ASSERT(false); break; } A_MEMZERO(pIOPacket->pBuffer,AR6K_REG_IO_BUFFER_SIZE); if (AsyncMode) { /* stick in our completion routine when the I/O operation completes */ pIOPacket->Completion = DevGMboxReadCreditsAsyncHandler; pIOPacket->pContext = pDev; /* read registers asynchronously */ HIFReadWrite(pDev->HIFDevice, AR6K_GMBOX_CREDIT_DEC_ADDRESS, pIOPacket->pBuffer, AR6K_REG_IO_BUFFER_SIZE, /* hit the register multiple times */ HIF_RD_ASYNC_BYTE_FIX, pIOPacket); pIOPacket = NULL; break; } pIOPacket->Completion = NULL; /* if we get here we are doing it synchronously */ status = HIFReadWrite(pDev->HIFDevice, AR6K_GMBOX_CREDIT_DEC_ADDRESS, pIOPacket->pBuffer, AR6K_REG_IO_BUFFER_SIZE, HIF_RD_SYNC_BYTE_FIX, NULL); } while (false); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (" DevGMboxReadCreditCounter failed! status:%d \n", status)); } if (pIOPacket != NULL) { if (!status) { /* sync mode processing */ *pCredits = ProcessCreditCounterReadBuffer(pIOPacket->pBuffer, AR6K_REG_IO_BUFFER_SIZE); } AR6KFreeIOPacket(pDev,pIOPacket); } AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-DevGMboxReadCreditCounter (%s) (%d) \n", AsyncMode ? "ASYNC" : "SYNC", status)); return status; } int DevGMboxReadCreditSize(struct ar6k_device *pDev, int *pCreditSize) { int status; u8 buffer[4]; status = HIFReadWrite(pDev->HIFDevice, AR6K_GMBOX_CREDIT_SIZE_ADDRESS, buffer, sizeof(buffer), HIF_RD_SYNC_BYTE_FIX, /* hit the register 4 times to align the I/O */ NULL); if (!status) { if (buffer[0] == 0) { *pCreditSize = 256; } else { *pCreditSize = buffer[0]; } } return status; } void DevNotifyGMboxTargetFailure(struct ar6k_device *pDev) { /* Target ASSERTED!!! */ if (pDev->GMboxInfo.pTargetFailureCallback != NULL) { pDev->GMboxInfo.pTargetFailureCallback(pDev->GMboxInfo.pProtocolContext, A_HARDWARE); } } int DevGMboxRecvLookAheadPeek(struct ar6k_device *pDev, u8 *pLookAheadBuffer, int *pLookAheadBytes) { int status = 0; struct ar6k_irq_proc_registers procRegs; int maxCopy; do { /* on entry the caller provides the length of the lookahead buffer */ if (*pLookAheadBytes > sizeof(procRegs.rx_gmbox_lookahead_alias)) { A_ASSERT(false); status = A_EINVAL; break; } maxCopy = *pLookAheadBytes; *pLookAheadBytes = 0; /* load the register table from the device */ status = HIFReadWrite(pDev->HIFDevice, HOST_INT_STATUS_ADDRESS, (u8 *)&procRegs, AR6K_IRQ_PROC_REGS_SIZE, HIF_RD_SYNC_BYTE_INC, NULL); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("DevGMboxRecvLookAheadPeek : Failed to read register table (%d) \n",status)); break; } if (procRegs.gmbox_rx_avail > 0) { int bytes = procRegs.gmbox_rx_avail > maxCopy ? maxCopy : procRegs.gmbox_rx_avail; memcpy(pLookAheadBuffer,&procRegs.rx_gmbox_lookahead_alias[0],bytes); *pLookAheadBytes = bytes; } } while (false); return status; } int DevGMboxSetTargetInterrupt(struct ar6k_device *pDev, int Signal, int AckTimeoutMS) { int status = 0; int i; u8 buffer[4]; A_MEMZERO(buffer, sizeof(buffer)); do { if (Signal >= MBOX_SIG_HCI_BRIDGE_MAX) { status = A_EINVAL; break; } /* set the last buffer to do the actual signal trigger */ buffer[3] = (1 << Signal); status = HIFReadWrite(pDev->HIFDevice, INT_WLAN_ADDRESS, buffer, sizeof(buffer), HIF_WR_SYNC_BYTE_FIX, /* hit the register 4 times to align the I/O */ NULL); if (status) { break; } } while (false); if (!status) { /* now read back the register to see if the bit cleared */ while (AckTimeoutMS) { status = HIFReadWrite(pDev->HIFDevice, INT_WLAN_ADDRESS, buffer, sizeof(buffer), HIF_RD_SYNC_BYTE_FIX, NULL); if (status) { break; } for (i = 0; i < sizeof(buffer); i++) { if (buffer[i] & (1 << Signal)) { /* bit is still set */ break; } } if (i >= sizeof(buffer)) { /* done */ break; } AckTimeoutMS--; A_MDELAY(1); } if (0 == AckTimeoutMS) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("DevGMboxSetTargetInterrupt : Ack Timed-out (sig:%d) \n",Signal)); status = A_ERROR; } } return status; } #endif //ATH_AR6K_ENABLE_GMBOX
gpl-2.0
zombi-x/android_kernel_oppo_msm8974
arch/arm/mach-msm/dal_remotetest.c
3319
11008
/* Copyright (c) 2008-2009, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * DAL remote test device test suite. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/debugfs.h> #include "dal_remotetest.h" #define BYTEBUF_LEN 64 #define rpc_error(num) \ do { \ errmask |= (1 << num); \ printk(KERN_INFO "%s: remote_unittest_%d failed (%d)\n", \ __func__, num, ret); \ } while (0) #define verify_error(num, field) \ do { \ errmask |= (1 << num); \ printk(KERN_INFO "%s: remote_unittest_%d failed (%s)\n", \ __func__, num, field); \ } while (0) static struct dentry *debugfs_dir_entry; static struct dentry *debugfs_modem_entry; static struct dentry *debugfs_dsp_entry; static uint8_t in_bytebuf[BYTEBUF_LEN]; static uint8_t out_bytebuf[BYTEBUF_LEN]; static uint8_t out_bytebuf2[BYTEBUF_LEN]; static struct remote_test_data in_data; static struct remote_test_data out_data; static int block_until_cb = 1; static void init_data(struct remote_test_data *data) { int i; data->regular_event = REMOTE_UNITTEST_INPUT_HANDLE; data->payload_event = REMOTE_UNITTEST_INPUT_HANDLE; for (i = 0; i < 32; i++) data->test[i] = i; } static int verify_data(struct remote_test_data *data) { int i; if (data->regular_event != REMOTE_UNITTEST_INPUT_HANDLE || data->payload_event != REMOTE_UNITTEST_INPUT_HANDLE) return -1; for (i = 0; i < 32; i++) if (data->test[i] != i) return -1; return 0; } static int verify_uint32_buffer(uint32_t *buf) { int i; for (i = 0; i < 32; i++) if (buf[i] != i) return -1; return 0; } static void init_bytebuf(uint8_t *bytebuf) { int i; for (i = 0; i < BYTEBUF_LEN; i++) bytebuf[i] = i & 0xff; } static int verify_bytebuf(uint8_t *bytebuf) { int i; for (i = 0; i < BYTEBUF_LEN; i++) if (bytebuf[i] != (i & 0xff)) return -1; return 0; } static void test_cb(void *context, uint32_t param, void *data, uint32_t len) { block_until_cb = 0; } static int remotetest_exec(int dest, u64 *val) { void *dev_handle; void *event_handles[3]; void *cb_handle; int ret; u64 errmask = 0; uint32_t ouint; uint32_t oalen; /* test daldevice_attach */ ret = daldevice_attach(REMOTE_UNITTEST_DEVICEID, NULL, dest, &dev_handle); if (ret) { printk(KERN_INFO "%s: failed to attach (%d)\n", __func__, ret); *val = 0xffffffff; return 0; } /* test remote_unittest_0 */ ret = remote_unittest_0(dev_handle, REMOTE_UNITTEST_INARG_1); if (ret) rpc_error(0); /* test remote_unittest_1 */ ret = remote_unittest_1(dev_handle, REMOTE_UNITTEST_INARG_1, REMOTE_UNITTEST_INARG_2); if (ret) rpc_error(1); /* test remote_unittest_2 */ ouint = 0; ret = remote_unittest_2(dev_handle, REMOTE_UNITTEST_INARG_1, &ouint); if (ret) rpc_error(2); else if (ouint != REMOTE_UNITTEST_OUTARG_1) verify_error(2, "ouint"); /* test remote_unittest_3 */ ret = remote_unittest_3(dev_handle, REMOTE_UNITTEST_INARG_1, REMOTE_UNITTEST_INARG_2, REMOTE_UNITTEST_INARG_3); if (ret) rpc_error(3); /* test remote_unittest_4 */ ouint = 0; ret = remote_unittest_4(dev_handle, REMOTE_UNITTEST_INARG_1, REMOTE_UNITTEST_INARG_2, &ouint); if (ret) rpc_error(4); else if (ouint != REMOTE_UNITTEST_OUTARG_1) verify_error(4, "ouint"); /* test remote_unittest_5 */ init_data(&in_data); ret = remote_unittest_5(dev_handle, &in_data, sizeof(in_data)); if (ret) rpc_error(5); /* test remote_unittest_6 */ init_data(&in_data); ret = remote_unittest_6(dev_handle, REMOTE_UNITTEST_INARG_1, &in_data.test, sizeof(in_data.test)); if (ret) rpc_error(6); /* test remote_unittest_7 */ init_data(&in_data); memset(&out_data, 0, sizeof(out_data)); ret = remote_unittest_7(dev_handle, &in_data, sizeof(in_data), &out_data.test, sizeof(out_data.test), &oalen); if (ret) rpc_error(7); else if (oalen != sizeof(out_data.test)) verify_error(7, "oalen"); else if (verify_uint32_buffer(out_data.test)) verify_error(7, "obuf"); /* test remote_unittest_8 */ init_bytebuf(in_bytebuf); memset(&out_data, 0, sizeof(out_data)); ret = remote_unittest_8(dev_handle, in_bytebuf, sizeof(in_bytebuf), &out_data, sizeof(out_data)); if (ret) rpc_error(8); else if (verify_data(&out_data)) verify_error(8, "obuf"); /* test remote_unittest_9 */ memset(&out_bytebuf, 0, sizeof(out_bytebuf)); ret = remote_unittest_9(dev_handle, out_bytebuf, sizeof(out_bytebuf)); if (ret) rpc_error(9); else if (verify_bytebuf(out_bytebuf)) verify_error(9, "obuf"); /* test remote_unittest_10 */ init_bytebuf(in_bytebuf); memset(&out_bytebuf, 0, sizeof(out_bytebuf)); ret = remote_unittest_10(dev_handle, REMOTE_UNITTEST_INARG_1, in_bytebuf, sizeof(in_bytebuf), out_bytebuf, sizeof(out_bytebuf), &oalen); if (ret) rpc_error(10); else if (oalen != sizeof(out_bytebuf)) verify_error(10, "oalen"); else if (verify_bytebuf(out_bytebuf)) verify_error(10, "obuf"); /* test remote_unittest_11 */ memset(&out_bytebuf, 0, sizeof(out_bytebuf)); ret = remote_unittest_11(dev_handle, REMOTE_UNITTEST_INARG_1, out_bytebuf, sizeof(out_bytebuf)); if (ret) rpc_error(11); else if (verify_bytebuf(out_bytebuf)) verify_error(11, "obuf"); /* test remote_unittest_12 */ memset(&out_bytebuf, 0, sizeof(out_bytebuf)); ret = remote_unittest_12(dev_handle, REMOTE_UNITTEST_INARG_1, out_bytebuf, sizeof(out_bytebuf), &oalen); if (ret) rpc_error(12); else if (oalen != sizeof(out_bytebuf)) verify_error(12, "oalen"); else if (verify_bytebuf(out_bytebuf)) verify_error(12, "obuf"); /* test remote_unittest_13 */ init_data(&in_data); memset(&out_data, 0, sizeof(out_data)); ret = remote_unittest_13(dev_handle, in_data.test, sizeof(in_data.test), &in_data, sizeof(in_data), &out_data, sizeof(out_data)); if (ret) rpc_error(13); else if (verify_data(&out_data)) verify_error(13, "obuf"); /* test remote_unittest_14 */ init_data(&in_data); memset(out_bytebuf, 0, sizeof(out_bytebuf)); memset(out_bytebuf2, 0, sizeof(out_bytebuf2)); ret = remote_unittest_14(dev_handle, in_data.test, sizeof(in_data.test), out_bytebuf, sizeof(out_bytebuf), out_bytebuf2, sizeof(out_bytebuf2), &oalen); if (ret) rpc_error(14); else if (verify_bytebuf(out_bytebuf)) verify_error(14, "obuf"); else if (oalen != sizeof(out_bytebuf2)) verify_error(14, "oalen"); else if (verify_bytebuf(out_bytebuf2)) verify_error(14, "obuf2"); /* test remote_unittest_15 */ init_data(&in_data); memset(out_bytebuf, 0, sizeof(out_bytebuf)); memset(&out_data, 0, sizeof(out_data)); ret = remote_unittest_15(dev_handle, in_data.test, sizeof(in_data.test), &in_data, sizeof(in_data), &out_data, sizeof(out_data), &oalen, out_bytebuf, sizeof(out_bytebuf)); if (ret) rpc_error(15); else if (oalen != sizeof(out_data)) verify_error(15, "oalen"); else if (verify_bytebuf(out_bytebuf)) verify_error(15, "obuf"); else if (verify_data(&out_data)) verify_error(15, "obuf2"); /* test setting up asynch events */ event_handles[0] = dalrpc_alloc_event(dev_handle); event_handles[1] = dalrpc_alloc_event(dev_handle); event_handles[2] = dalrpc_alloc_event(dev_handle); cb_handle = dalrpc_alloc_cb(dev_handle, test_cb, &out_data); in_data.regular_event = (uint32_t)event_handles[2]; in_data.payload_event = (uint32_t)cb_handle; ret = remote_unittest_eventcfg(dev_handle, &in_data, sizeof(in_data)); if (ret) { errmask |= (1 << 16); printk(KERN_INFO "%s: failed to configure asynch (%d)\n", __func__, ret); } /* test event */ ret = remote_unittest_eventtrig(dev_handle, REMOTE_UNITTEST_REGULAR_EVENT); if (ret) { errmask |= (1 << 17); printk(KERN_INFO "%s: failed to trigger event (%d)\n", __func__, ret); } ret = dalrpc_event_wait(event_handles[2], 1000); if (ret) { errmask |= (1 << 18); printk(KERN_INFO "%s: failed to receive event (%d)\n", __func__, ret); } /* test event again */ ret = remote_unittest_eventtrig(dev_handle, REMOTE_UNITTEST_REGULAR_EVENT); if (ret) { errmask |= (1 << 19); printk(KERN_INFO "%s: failed to trigger event (%d)\n", __func__, ret); } ret = dalrpc_event_wait_multiple(3, event_handles, 1000); if (ret != 2) { errmask |= (1 << 20); printk(KERN_INFO "%s: failed to receive event (%d)\n", __func__, ret); } /* test callback */ ret = remote_unittest_eventtrig(dev_handle, REMOTE_UNITTEST_CALLBACK_EVENT); if (ret) { errmask |= (1 << 21); printk(KERN_INFO "%s: failed to trigger callback (%d)\n", __func__, ret); } else while (block_until_cb) ; dalrpc_dealloc_cb(dev_handle, cb_handle); dalrpc_dealloc_event(dev_handle, event_handles[0]); dalrpc_dealloc_event(dev_handle, event_handles[1]); dalrpc_dealloc_event(dev_handle, event_handles[2]); /* test daldevice_detach */ ret = daldevice_detach(dev_handle); if (ret) { errmask |= (1 << 22); printk(KERN_INFO "%s: failed to detach (%d)\n", __func__, ret); } printk(KERN_INFO "%s: remote_unittest complete\n", __func__); *val = errmask; return 0; } static int remotetest_modem_exec(void *data, u64 *val) { return remotetest_exec(DALRPC_DEST_MODEM, val); } static int remotetest_dsp_exec(void *data, u64 *val) { return remotetest_exec(DALRPC_DEST_QDSP, val); } DEFINE_SIMPLE_ATTRIBUTE(dal_modemtest_fops, remotetest_modem_exec, NULL, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(dal_dsptest_fops, remotetest_dsp_exec, NULL, "%llu\n"); static int __init remotetest_init(void) { debugfs_dir_entry = debugfs_create_dir("dal", 0); if (IS_ERR(debugfs_dir_entry)) return PTR_ERR(debugfs_dir_entry); debugfs_modem_entry = debugfs_create_file("modem_test", 0444, debugfs_dir_entry, NULL, &dal_modemtest_fops); if (IS_ERR(debugfs_modem_entry)) { debugfs_remove(debugfs_dir_entry); return PTR_ERR(debugfs_modem_entry); } debugfs_dsp_entry = debugfs_create_file("dsp_test", 0444, debugfs_dir_entry, NULL, &dal_dsptest_fops); if (IS_ERR(debugfs_dsp_entry)) { debugfs_remove(debugfs_modem_entry); debugfs_remove(debugfs_dir_entry); return PTR_ERR(debugfs_dsp_entry); } return 0; } static void __exit remotetest_exit(void) { debugfs_remove(debugfs_modem_entry); debugfs_remove(debugfs_dsp_entry); debugfs_remove(debugfs_dir_entry); } MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Test for DAL RPC"); MODULE_VERSION("1.0"); module_init(remotetest_init); module_exit(remotetest_exit);
gpl-2.0
chli/tripndroid-endeavoru-3.0
crypto/pcompress.c
4599
2540
/* * Cryptographic API. * * Partial (de)compression operations. * * Copyright 2008 Sony Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. * If not, see <http://www.gnu.org/licenses/>. */ #include <linux/crypto.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/string.h> #include <crypto/compress.h> #include <crypto/internal/compress.h> #include "internal.h" static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask) { return 0; } static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg) { return alg->cra_ctxsize; } static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm) { return 0; } static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) __attribute__ ((unused)); static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : pcomp\n"); } static const struct crypto_type crypto_pcomp_type = { .extsize = crypto_pcomp_extsize, .init = crypto_pcomp_init, .init_tfm = crypto_pcomp_init_tfm, #ifdef CONFIG_PROC_FS .show = crypto_pcomp_show, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_PCOMPRESS, .tfmsize = offsetof(struct crypto_pcomp, base), }; struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type, u32 mask) { return crypto_alloc_tfm(alg_name, &crypto_pcomp_type, type, mask); } EXPORT_SYMBOL_GPL(crypto_alloc_pcomp); int crypto_register_pcomp(struct pcomp_alg *alg) { struct crypto_alg *base = &alg->base; base->cra_type = &crypto_pcomp_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_PCOMPRESS; return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_pcomp); int crypto_unregister_pcomp(struct pcomp_alg *alg) { return crypto_unregister_alg(&alg->base); } EXPORT_SYMBOL_GPL(crypto_unregister_pcomp); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Partial (de)compression type"); MODULE_AUTHOR("Sony Corporation");
gpl-2.0
blitzmohit/dragonboard-rtlinux-3.4
drivers/net/ethernet/ethoc.c
4855
29614
/* * linux/drivers/net/ethernet/ethoc.c * * Copyright (C) 2007-2008 Avionic Design Development GmbH * Copyright (C) 2008-2009 Avionic Design GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Written by Thierry Reding <thierry.reding@avionic-design.de> */ #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/module.h> #include <net/ethoc.h> static int buffer_size = 0x8000; /* 32 KBytes */ module_param(buffer_size, int, 0); MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); /* register offsets */ #define MODER 0x00 #define INT_SOURCE 0x04 #define INT_MASK 0x08 #define IPGT 0x0c #define IPGR1 0x10 #define IPGR2 0x14 #define PACKETLEN 0x18 #define COLLCONF 0x1c #define TX_BD_NUM 0x20 #define CTRLMODER 0x24 #define MIIMODER 0x28 #define MIICOMMAND 0x2c #define MIIADDRESS 0x30 #define MIITX_DATA 0x34 #define MIIRX_DATA 0x38 #define MIISTATUS 0x3c #define MAC_ADDR0 0x40 #define MAC_ADDR1 0x44 #define ETH_HASH0 0x48 #define ETH_HASH1 0x4c #define ETH_TXCTRL 0x50 /* mode register */ #define MODER_RXEN (1 << 0) /* receive enable */ #define MODER_TXEN (1 << 1) /* transmit enable */ #define MODER_NOPRE (1 << 2) /* no preamble */ #define MODER_BRO (1 << 3) /* broadcast address */ #define MODER_IAM (1 << 4) /* individual address mode */ #define MODER_PRO (1 << 5) /* promiscuous mode */ #define MODER_IFG (1 << 6) /* interframe gap for incoming frames */ #define MODER_LOOP (1 << 7) /* loopback */ #define MODER_NBO (1 << 8) /* no back-off */ #define MODER_EDE (1 << 9) /* excess defer enable */ #define MODER_FULLD (1 << 10) /* full duplex */ #define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */ #define MODER_DCRC (1 << 12) /* delayed CRC enable */ #define MODER_CRC (1 << 13) /* CRC enable */ #define MODER_HUGE (1 << 14) /* huge packets enable */ #define MODER_PAD (1 << 15) /* padding enabled */ #define MODER_RSM (1 << 16) /* receive small packets */ /* interrupt source and mask registers */ #define INT_MASK_TXF (1 << 0) /* transmit frame */ #define INT_MASK_TXE (1 << 1) /* transmit error */ #define INT_MASK_RXF (1 << 2) /* receive frame */ #define INT_MASK_RXE (1 << 3) /* receive error */ #define INT_MASK_BUSY (1 << 4) #define INT_MASK_TXC (1 << 5) /* transmit control frame */ #define INT_MASK_RXC (1 << 6) /* receive control frame */ #define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE) #define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE) #define INT_MASK_ALL ( \ INT_MASK_TXF | INT_MASK_TXE | \ INT_MASK_RXF | INT_MASK_RXE | \ INT_MASK_TXC | INT_MASK_RXC | \ INT_MASK_BUSY \ ) /* packet length register */ #define PACKETLEN_MIN(min) (((min) & 0xffff) << 16) #define PACKETLEN_MAX(max) (((max) & 0xffff) << 0) #define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \ PACKETLEN_MAX(max)) /* transmit buffer number register */ #define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80) /* control module mode register */ #define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */ #define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */ #define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */ /* MII mode register */ #define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */ #define MIIMODER_NOPRE (1 << 8) /* no preamble */ /* MII command register */ #define MIICOMMAND_SCAN (1 << 0) /* scan status */ #define MIICOMMAND_READ (1 << 1) /* read status */ #define MIICOMMAND_WRITE (1 << 2) /* write control data */ /* MII address register */ #define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0) #define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8) #define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \ MIIADDRESS_RGAD(reg)) /* MII transmit data register */ #define MIITX_DATA_VAL(x) ((x) & 0xffff) /* MII receive data register */ #define MIIRX_DATA_VAL(x) ((x) & 0xffff) /* MII status register */ #define MIISTATUS_LINKFAIL (1 << 0) #define MIISTATUS_BUSY (1 << 1) #define MIISTATUS_INVALID (1 << 2) /* TX buffer descriptor */ #define TX_BD_CS (1 << 0) /* carrier sense lost */ #define TX_BD_DF (1 << 1) /* defer indication */ #define TX_BD_LC (1 << 2) /* late collision */ #define TX_BD_RL (1 << 3) /* retransmission limit */ #define TX_BD_RETRY_MASK (0x00f0) #define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4) #define TX_BD_UR (1 << 8) /* transmitter underrun */ #define TX_BD_CRC (1 << 11) /* TX CRC enable */ #define TX_BD_PAD (1 << 12) /* pad enable for short packets */ #define TX_BD_WRAP (1 << 13) #define TX_BD_IRQ (1 << 14) /* interrupt request enable */ #define TX_BD_READY (1 << 15) /* TX buffer ready */ #define TX_BD_LEN(x) (((x) & 0xffff) << 16) #define TX_BD_LEN_MASK (0xffff << 16) #define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \ TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR) /* RX buffer descriptor */ #define RX_BD_LC (1 << 0) /* late collision */ #define RX_BD_CRC (1 << 1) /* RX CRC error */ #define RX_BD_SF (1 << 2) /* short frame */ #define RX_BD_TL (1 << 3) /* too long */ #define RX_BD_DN (1 << 4) /* dribble nibble */ #define RX_BD_IS (1 << 5) /* invalid symbol */ #define RX_BD_OR (1 << 6) /* receiver overrun */ #define RX_BD_MISS (1 << 7) #define RX_BD_CF (1 << 8) /* control frame */ #define RX_BD_WRAP (1 << 13) #define RX_BD_IRQ (1 << 14) /* interrupt request enable */ #define RX_BD_EMPTY (1 << 15) #define RX_BD_LEN(x) (((x) & 0xffff) << 16) #define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \ RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS) #define ETHOC_BUFSIZ 1536 #define ETHOC_ZLEN 64 #define ETHOC_BD_BASE 0x400 #define ETHOC_TIMEOUT (HZ / 2) #define ETHOC_MII_TIMEOUT (1 + (HZ / 5)) /** * struct ethoc - driver-private device structure * @iobase: pointer to I/O memory region * @membase: pointer to buffer memory region * @dma_alloc: dma allocated buffer size * @io_region_size: I/O memory region size * @num_tx: number of send buffers * @cur_tx: last send buffer written * @dty_tx: last buffer actually sent * @num_rx: number of receive buffers * @cur_rx: current receive buffer * @vma: pointer to array of virtual memory addresses for buffers * @netdev: pointer to network device structure * @napi: NAPI structure * @msg_enable: device state flags * @lock: device lock * @phy: attached PHY * @mdio: MDIO bus for PHY access * @phy_id: address of attached PHY */ struct ethoc { void __iomem *iobase; void __iomem *membase; int dma_alloc; resource_size_t io_region_size; unsigned int num_tx; unsigned int cur_tx; unsigned int dty_tx; unsigned int num_rx; unsigned int cur_rx; void** vma; struct net_device *netdev; struct napi_struct napi; u32 msg_enable; spinlock_t lock; struct phy_device *phy; struct mii_bus *mdio; s8 phy_id; }; /** * struct ethoc_bd - buffer descriptor * @stat: buffer statistics * @addr: physical memory address */ struct ethoc_bd { u32 stat; u32 addr; }; static inline u32 ethoc_read(struct ethoc *dev, loff_t offset) { return ioread32(dev->iobase + offset); } static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) { iowrite32(data, dev->iobase + offset); } static inline void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) { loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); bd->stat = ethoc_read(dev, offset + 0); bd->addr = ethoc_read(dev, offset + 4); } static inline void ethoc_write_bd(struct ethoc *dev, int index, const struct ethoc_bd *bd) { loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); ethoc_write(dev, offset + 0, bd->stat); ethoc_write(dev, offset + 4, bd->addr); } static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask) { u32 imask = ethoc_read(dev, INT_MASK); imask |= mask; ethoc_write(dev, INT_MASK, imask); } static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask) { u32 imask = ethoc_read(dev, INT_MASK); imask &= ~mask; ethoc_write(dev, INT_MASK, imask); } static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask) { ethoc_write(dev, INT_SOURCE, mask); } static inline void ethoc_enable_rx_and_tx(struct ethoc *dev) { u32 mode = ethoc_read(dev, MODER); mode |= MODER_RXEN | MODER_TXEN; ethoc_write(dev, MODER, mode); } static inline void ethoc_disable_rx_and_tx(struct ethoc *dev) { u32 mode = ethoc_read(dev, MODER); mode &= ~(MODER_RXEN | MODER_TXEN); ethoc_write(dev, MODER, mode); } static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start) { struct ethoc_bd bd; int i; void* vma; dev->cur_tx = 0; dev->dty_tx = 0; dev->cur_rx = 0; ethoc_write(dev, TX_BD_NUM, dev->num_tx); /* setup transmission buffers */ bd.addr = mem_start; bd.stat = TX_BD_IRQ | TX_BD_CRC; vma = dev->membase; for (i = 0; i < dev->num_tx; i++) { if (i == dev->num_tx - 1) bd.stat |= TX_BD_WRAP; ethoc_write_bd(dev, i, &bd); bd.addr += ETHOC_BUFSIZ; dev->vma[i] = vma; vma += ETHOC_BUFSIZ; } bd.stat = RX_BD_EMPTY | RX_BD_IRQ; for (i = 0; i < dev->num_rx; i++) { if (i == dev->num_rx - 1) bd.stat |= RX_BD_WRAP; ethoc_write_bd(dev, dev->num_tx + i, &bd); bd.addr += ETHOC_BUFSIZ; dev->vma[dev->num_tx + i] = vma; vma += ETHOC_BUFSIZ; } return 0; } static int ethoc_reset(struct ethoc *dev) { u32 mode; /* TODO: reset controller? */ ethoc_disable_rx_and_tx(dev); /* TODO: setup registers */ /* enable FCS generation and automatic padding */ mode = ethoc_read(dev, MODER); mode |= MODER_CRC | MODER_PAD; ethoc_write(dev, MODER, mode); /* set full-duplex mode */ mode = ethoc_read(dev, MODER); mode |= MODER_FULLD; ethoc_write(dev, MODER, mode); ethoc_write(dev, IPGT, 0x15); ethoc_ack_irq(dev, INT_MASK_ALL); ethoc_enable_irq(dev, INT_MASK_ALL); ethoc_enable_rx_and_tx(dev); return 0; } static unsigned int ethoc_update_rx_stats(struct ethoc *dev, struct ethoc_bd *bd) { struct net_device *netdev = dev->netdev; unsigned int ret = 0; if (bd->stat & RX_BD_TL) { dev_err(&netdev->dev, "RX: frame too long\n"); netdev->stats.rx_length_errors++; ret++; } if (bd->stat & RX_BD_SF) { dev_err(&netdev->dev, "RX: frame too short\n"); netdev->stats.rx_length_errors++; ret++; } if (bd->stat & RX_BD_DN) { dev_err(&netdev->dev, "RX: dribble nibble\n"); netdev->stats.rx_frame_errors++; } if (bd->stat & RX_BD_CRC) { dev_err(&netdev->dev, "RX: wrong CRC\n"); netdev->stats.rx_crc_errors++; ret++; } if (bd->stat & RX_BD_OR) { dev_err(&netdev->dev, "RX: overrun\n"); netdev->stats.rx_over_errors++; ret++; } if (bd->stat & RX_BD_MISS) netdev->stats.rx_missed_errors++; if (bd->stat & RX_BD_LC) { dev_err(&netdev->dev, "RX: late collision\n"); netdev->stats.collisions++; ret++; } return ret; } static int ethoc_rx(struct net_device *dev, int limit) { struct ethoc *priv = netdev_priv(dev); int count; for (count = 0; count < limit; ++count) { unsigned int entry; struct ethoc_bd bd; entry = priv->num_tx + priv->cur_rx; ethoc_read_bd(priv, entry, &bd); if (bd.stat & RX_BD_EMPTY) { ethoc_ack_irq(priv, INT_MASK_RX); /* If packet (interrupt) came in between checking * BD_EMTPY and clearing the interrupt source, then we * risk missing the packet as the RX interrupt won't * trigger right away when we reenable it; hence, check * BD_EMTPY here again to make sure there isn't such a * packet waiting for us... */ ethoc_read_bd(priv, entry, &bd); if (bd.stat & RX_BD_EMPTY) break; } if (ethoc_update_rx_stats(priv, &bd) == 0) { int size = bd.stat >> 16; struct sk_buff *skb; size -= 4; /* strip the CRC */ skb = netdev_alloc_skb_ip_align(dev, size); if (likely(skb)) { void *src = priv->vma[entry]; memcpy_fromio(skb_put(skb, size), src, size); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += size; netif_receive_skb(skb); } else { if (net_ratelimit()) dev_warn(&dev->dev, "low on memory - " "packet dropped\n"); dev->stats.rx_dropped++; break; } } /* clear the buffer descriptor so it can be reused */ bd.stat &= ~RX_BD_STATS; bd.stat |= RX_BD_EMPTY; ethoc_write_bd(priv, entry, &bd); if (++priv->cur_rx == priv->num_rx) priv->cur_rx = 0; } return count; } static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) { struct net_device *netdev = dev->netdev; if (bd->stat & TX_BD_LC) { dev_err(&netdev->dev, "TX: late collision\n"); netdev->stats.tx_window_errors++; } if (bd->stat & TX_BD_RL) { dev_err(&netdev->dev, "TX: retransmit limit\n"); netdev->stats.tx_aborted_errors++; } if (bd->stat & TX_BD_UR) { dev_err(&netdev->dev, "TX: underrun\n"); netdev->stats.tx_fifo_errors++; } if (bd->stat & TX_BD_CS) { dev_err(&netdev->dev, "TX: carrier sense lost\n"); netdev->stats.tx_carrier_errors++; } if (bd->stat & TX_BD_STATS) netdev->stats.tx_errors++; netdev->stats.collisions += (bd->stat >> 4) & 0xf; netdev->stats.tx_bytes += bd->stat >> 16; netdev->stats.tx_packets++; } static int ethoc_tx(struct net_device *dev, int limit) { struct ethoc *priv = netdev_priv(dev); int count; struct ethoc_bd bd; for (count = 0; count < limit; ++count) { unsigned int entry; entry = priv->dty_tx & (priv->num_tx-1); ethoc_read_bd(priv, entry, &bd); if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) { ethoc_ack_irq(priv, INT_MASK_TX); /* If interrupt came in between reading in the BD * and clearing the interrupt source, then we risk * missing the event as the TX interrupt won't trigger * right away when we reenable it; hence, check * BD_EMPTY here again to make sure there isn't such an * event pending... */ ethoc_read_bd(priv, entry, &bd); if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) break; } ethoc_update_tx_stats(priv, &bd); priv->dty_tx++; } if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) netif_wake_queue(dev); return count; } static irqreturn_t ethoc_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct ethoc *priv = netdev_priv(dev); u32 pending; u32 mask; /* Figure out what triggered the interrupt... * The tricky bit here is that the interrupt source bits get * set in INT_SOURCE for an event regardless of whether that * event is masked or not. Thus, in order to figure out what * triggered the interrupt, we need to remove the sources * for all events that are currently masked. This behaviour * is not particularly well documented but reasonable... */ mask = ethoc_read(priv, INT_MASK); pending = ethoc_read(priv, INT_SOURCE); pending &= mask; if (unlikely(pending == 0)) { return IRQ_NONE; } ethoc_ack_irq(priv, pending); /* We always handle the dropped packet interrupt */ if (pending & INT_MASK_BUSY) { dev_err(&dev->dev, "packet dropped\n"); dev->stats.rx_dropped++; } /* Handle receive/transmit event by switching to polling */ if (pending & (INT_MASK_TX | INT_MASK_RX)) { ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX); napi_schedule(&priv->napi); } return IRQ_HANDLED; } static int ethoc_get_mac_address(struct net_device *dev, void *addr) { struct ethoc *priv = netdev_priv(dev); u8 *mac = (u8 *)addr; u32 reg; reg = ethoc_read(priv, MAC_ADDR0); mac[2] = (reg >> 24) & 0xff; mac[3] = (reg >> 16) & 0xff; mac[4] = (reg >> 8) & 0xff; mac[5] = (reg >> 0) & 0xff; reg = ethoc_read(priv, MAC_ADDR1); mac[0] = (reg >> 8) & 0xff; mac[1] = (reg >> 0) & 0xff; return 0; } static int ethoc_poll(struct napi_struct *napi, int budget) { struct ethoc *priv = container_of(napi, struct ethoc, napi); int rx_work_done = 0; int tx_work_done = 0; rx_work_done = ethoc_rx(priv->netdev, budget); tx_work_done = ethoc_tx(priv->netdev, budget); if (rx_work_done < budget && tx_work_done < budget) { napi_complete(napi); ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); } return rx_work_done; } static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) { struct ethoc *priv = bus->priv; int i; ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); for (i=0; i < 5; i++) { u32 status = ethoc_read(priv, MIISTATUS); if (!(status & MIISTATUS_BUSY)) { u32 data = ethoc_read(priv, MIIRX_DATA); /* reset MII command register */ ethoc_write(priv, MIICOMMAND, 0); return data; } usleep_range(100,200); } return -EBUSY; } static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) { struct ethoc *priv = bus->priv; int i; ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); ethoc_write(priv, MIITX_DATA, val); ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); for (i=0; i < 5; i++) { u32 stat = ethoc_read(priv, MIISTATUS); if (!(stat & MIISTATUS_BUSY)) { /* reset MII command register */ ethoc_write(priv, MIICOMMAND, 0); return 0; } usleep_range(100,200); } return -EBUSY; } static int ethoc_mdio_reset(struct mii_bus *bus) { return 0; } static void ethoc_mdio_poll(struct net_device *dev) { } static int __devinit ethoc_mdio_probe(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); struct phy_device *phy; int err; if (priv->phy_id != -1) { phy = priv->mdio->phy_map[priv->phy_id]; } else { phy = phy_find_first(priv->mdio); } if (!phy) { dev_err(&dev->dev, "no PHY found\n"); return -ENXIO; } err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0, PHY_INTERFACE_MODE_GMII); if (err) { dev_err(&dev->dev, "could not attach to PHY\n"); return err; } priv->phy = phy; return 0; } static int ethoc_open(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); int ret; ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED, dev->name, dev); if (ret) return ret; ethoc_init_ring(priv, dev->mem_start); ethoc_reset(priv); if (netif_queue_stopped(dev)) { dev_dbg(&dev->dev, " resuming queue\n"); netif_wake_queue(dev); } else { dev_dbg(&dev->dev, " starting queue\n"); netif_start_queue(dev); } phy_start(priv->phy); napi_enable(&priv->napi); if (netif_msg_ifup(priv)) { dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", dev->base_addr, dev->mem_start, dev->mem_end); } return 0; } static int ethoc_stop(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); napi_disable(&priv->napi); if (priv->phy) phy_stop(priv->phy); ethoc_disable_rx_and_tx(priv); free_irq(dev->irq, dev); if (!netif_queue_stopped(dev)) netif_stop_queue(dev); return 0; } static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct ethoc *priv = netdev_priv(dev); struct mii_ioctl_data *mdio = if_mii(ifr); struct phy_device *phy = NULL; if (!netif_running(dev)) return -EINVAL; if (cmd != SIOCGMIIPHY) { if (mdio->phy_id >= PHY_MAX_ADDR) return -ERANGE; phy = priv->mdio->phy_map[mdio->phy_id]; if (!phy) return -ENODEV; } else { phy = priv->phy; } return phy_mii_ioctl(phy, ifr, cmd); } static int ethoc_config(struct net_device *dev, struct ifmap *map) { return -ENOSYS; } static int ethoc_set_mac_address(struct net_device *dev, void *addr) { struct ethoc *priv = netdev_priv(dev); u8 *mac = (u8 *)addr; if (!is_valid_ether_addr(mac)) return -EADDRNOTAVAIL; ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | (mac[5] << 0)); ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); memcpy(dev->dev_addr, mac, ETH_ALEN); dev->addr_assign_type &= ~NET_ADDR_RANDOM; return 0; } static void ethoc_set_multicast_list(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); u32 mode = ethoc_read(priv, MODER); struct netdev_hw_addr *ha; u32 hash[2] = { 0, 0 }; /* set loopback mode if requested */ if (dev->flags & IFF_LOOPBACK) mode |= MODER_LOOP; else mode &= ~MODER_LOOP; /* receive broadcast frames if requested */ if (dev->flags & IFF_BROADCAST) mode &= ~MODER_BRO; else mode |= MODER_BRO; /* enable promiscuous mode if requested */ if (dev->flags & IFF_PROMISC) mode |= MODER_PRO; else mode &= ~MODER_PRO; ethoc_write(priv, MODER, mode); /* receive multicast frames */ if (dev->flags & IFF_ALLMULTI) { hash[0] = 0xffffffff; hash[1] = 0xffffffff; } else { netdev_for_each_mc_addr(ha, dev) { u32 crc = ether_crc(ETH_ALEN, ha->addr); int bit = (crc >> 26) & 0x3f; hash[bit >> 5] |= 1 << (bit & 0x1f); } } ethoc_write(priv, ETH_HASH0, hash[0]); ethoc_write(priv, ETH_HASH1, hash[1]); } static int ethoc_change_mtu(struct net_device *dev, int new_mtu) { return -ENOSYS; } static void ethoc_tx_timeout(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); u32 pending = ethoc_read(priv, INT_SOURCE); if (likely(pending)) ethoc_interrupt(dev->irq, dev); } static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); struct ethoc_bd bd; unsigned int entry; void *dest; if (unlikely(skb->len > ETHOC_BUFSIZ)) { dev->stats.tx_errors++; goto out; } entry = priv->cur_tx % priv->num_tx; spin_lock_irq(&priv->lock); priv->cur_tx++; ethoc_read_bd(priv, entry, &bd); if (unlikely(skb->len < ETHOC_ZLEN)) bd.stat |= TX_BD_PAD; else bd.stat &= ~TX_BD_PAD; dest = priv->vma[entry]; memcpy_toio(dest, skb->data, skb->len); bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); bd.stat |= TX_BD_LEN(skb->len); ethoc_write_bd(priv, entry, &bd); bd.stat |= TX_BD_READY; ethoc_write_bd(priv, entry, &bd); if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) { dev_dbg(&dev->dev, "stopping queue\n"); netif_stop_queue(dev); } spin_unlock_irq(&priv->lock); skb_tx_timestamp(skb); out: dev_kfree_skb(skb); return NETDEV_TX_OK; } static const struct net_device_ops ethoc_netdev_ops = { .ndo_open = ethoc_open, .ndo_stop = ethoc_stop, .ndo_do_ioctl = ethoc_ioctl, .ndo_set_config = ethoc_config, .ndo_set_mac_address = ethoc_set_mac_address, .ndo_set_rx_mode = ethoc_set_multicast_list, .ndo_change_mtu = ethoc_change_mtu, .ndo_tx_timeout = ethoc_tx_timeout, .ndo_start_xmit = ethoc_start_xmit, }; /** * ethoc_probe() - initialize OpenCores ethernet MAC * pdev: platform device */ static int __devinit ethoc_probe(struct platform_device *pdev) { struct net_device *netdev = NULL; struct resource *res = NULL; struct resource *mmio = NULL; struct resource *mem = NULL; struct ethoc *priv = NULL; unsigned int phy; int num_bd; int ret = 0; bool random_mac = false; /* allocate networking device */ netdev = alloc_etherdev(sizeof(struct ethoc)); if (!netdev) { ret = -ENOMEM; goto out; } SET_NETDEV_DEV(netdev, &pdev->dev); platform_set_drvdata(pdev, netdev); /* obtain I/O memory space */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain I/O memory space\n"); ret = -ENXIO; goto free; } mmio = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), res->name); if (!mmio) { dev_err(&pdev->dev, "cannot request I/O memory space\n"); ret = -ENXIO; goto free; } netdev->base_addr = mmio->start; /* obtain buffer memory space */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), res->name); if (!mem) { dev_err(&pdev->dev, "cannot request memory space\n"); ret = -ENXIO; goto free; } netdev->mem_start = mem->start; netdev->mem_end = mem->end; } /* obtain device IRQ number */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain IRQ\n"); ret = -ENXIO; goto free; } netdev->irq = res->start; /* setup driver-private data */ priv = netdev_priv(netdev); priv->netdev = netdev; priv->dma_alloc = 0; priv->io_region_size = resource_size(mmio); priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, resource_size(mmio)); if (!priv->iobase) { dev_err(&pdev->dev, "cannot remap I/O memory space\n"); ret = -ENXIO; goto error; } if (netdev->mem_end) { priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, resource_size(mem)); if (!priv->membase) { dev_err(&pdev->dev, "cannot remap memory space\n"); ret = -ENXIO; goto error; } } else { /* Allocate buffer memory */ priv->membase = dmam_alloc_coherent(&pdev->dev, buffer_size, (void *)&netdev->mem_start, GFP_KERNEL); if (!priv->membase) { dev_err(&pdev->dev, "cannot allocate %dB buffer\n", buffer_size); ret = -ENOMEM; goto error; } netdev->mem_end = netdev->mem_start + buffer_size; priv->dma_alloc = buffer_size; } /* calculate the number of TX/RX buffers, maximum 128 supported */ num_bd = min_t(unsigned int, 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); if (num_bd < 4) { ret = -ENODEV; goto error; } /* num_tx must be a power of two */ priv->num_tx = rounddown_pow_of_two(num_bd >> 1); priv->num_rx = num_bd - priv->num_tx; dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n", priv->num_tx, priv->num_rx); priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); if (!priv->vma) { ret = -ENOMEM; goto error; } /* Allow the platform setup code to pass in a MAC address. */ if (pdev->dev.platform_data) { struct ethoc_platform_data *pdata = pdev->dev.platform_data; memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); priv->phy_id = pdata->phy_id; } else { priv->phy_id = -1; #ifdef CONFIG_OF { const uint8_t* mac; mac = of_get_property(pdev->dev.of_node, "local-mac-address", NULL); if (mac) memcpy(netdev->dev_addr, mac, IFHWADDRLEN); } #endif } /* Check that the given MAC address is valid. If it isn't, read the * current MAC from the controller. */ if (!is_valid_ether_addr(netdev->dev_addr)) ethoc_get_mac_address(netdev, netdev->dev_addr); /* Check the MAC again for validity, if it still isn't choose and * program a random one. */ if (!is_valid_ether_addr(netdev->dev_addr)) { random_ether_addr(netdev->dev_addr); random_mac = true; } ret = ethoc_set_mac_address(netdev, netdev->dev_addr); if (ret) { dev_err(&netdev->dev, "failed to set MAC address\n"); goto error; } if (random_mac) netdev->addr_assign_type |= NET_ADDR_RANDOM; /* register MII bus */ priv->mdio = mdiobus_alloc(); if (!priv->mdio) { ret = -ENOMEM; goto free; } priv->mdio->name = "ethoc-mdio"; snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d", priv->mdio->name, pdev->id); priv->mdio->read = ethoc_mdio_read; priv->mdio->write = ethoc_mdio_write; priv->mdio->reset = ethoc_mdio_reset; priv->mdio->priv = priv; priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!priv->mdio->irq) { ret = -ENOMEM; goto free_mdio; } for (phy = 0; phy < PHY_MAX_ADDR; phy++) priv->mdio->irq[phy] = PHY_POLL; ret = mdiobus_register(priv->mdio); if (ret) { dev_err(&netdev->dev, "failed to register MDIO bus\n"); goto free_mdio; } ret = ethoc_mdio_probe(netdev); if (ret) { dev_err(&netdev->dev, "failed to probe MDIO bus\n"); goto error; } ether_setup(netdev); /* setup the net_device structure */ netdev->netdev_ops = &ethoc_netdev_ops; netdev->watchdog_timeo = ETHOC_TIMEOUT; netdev->features |= 0; /* setup NAPI */ netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); spin_lock_init(&priv->lock); ret = register_netdev(netdev); if (ret < 0) { dev_err(&netdev->dev, "failed to register interface\n"); goto error2; } goto out; error2: netif_napi_del(&priv->napi); error: mdiobus_unregister(priv->mdio); free_mdio: kfree(priv->mdio->irq); mdiobus_free(priv->mdio); free: free_netdev(netdev); out: return ret; } /** * ethoc_remove() - shutdown OpenCores ethernet MAC * @pdev: platform device */ static int __devexit ethoc_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct ethoc *priv = netdev_priv(netdev); platform_set_drvdata(pdev, NULL); if (netdev) { netif_napi_del(&priv->napi); phy_disconnect(priv->phy); priv->phy = NULL; if (priv->mdio) { mdiobus_unregister(priv->mdio); kfree(priv->mdio->irq); mdiobus_free(priv->mdio); } unregister_netdev(netdev); free_netdev(netdev); } return 0; } #ifdef CONFIG_PM static int ethoc_suspend(struct platform_device *pdev, pm_message_t state) { return -ENOSYS; } static int ethoc_resume(struct platform_device *pdev) { return -ENOSYS; } #else # define ethoc_suspend NULL # define ethoc_resume NULL #endif static struct of_device_id ethoc_match[] = { { .compatible = "opencores,ethoc", }, {}, }; MODULE_DEVICE_TABLE(of, ethoc_match); static struct platform_driver ethoc_driver = { .probe = ethoc_probe, .remove = __devexit_p(ethoc_remove), .suspend = ethoc_suspend, .resume = ethoc_resume, .driver = { .name = "ethoc", .owner = THIS_MODULE, .of_match_table = ethoc_match, }, }; module_platform_driver(ethoc_driver); MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); MODULE_DESCRIPTION("OpenCores Ethernet MAC driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
MassStash/htc_jewel_kernel_sense
drivers/scsi/eata_pio.c
8183
27933
/************************************************************ * * * Linux EATA SCSI PIO driver * * * * based on the CAM document CAM/89-004 rev. 2.0c, * * DPT's driver kit, some internal documents and source, * * and several other Linux scsi drivers and kernel docs. * * * * The driver currently: * * -supports all EATA-PIO boards * * -only supports DASD devices * * * * (c)1993-96 Michael Neuffer, Alfred Arnold * * neuffer@goofy.zdv.uni-mainz.de * * a.arnold@kfa-juelich.de * * * * Updated 2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> for * * Linux 2.5.x and the newer locking and error handling * * * * This program is free software; you can redistribute it * * and/or modify it under the terms of the GNU General * * Public License as published by the Free Software * * Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be * * useful, but WITHOUT ANY WARRANTY; without even the * * implied warranty of MERCHANTABILITY or FITNESS FOR A * * PARTICULAR PURPOSE. See the GNU General Public License * * for more details. * * * * You should have received a copy of the GNU General * * Public License along with this kernel; if not, write to * * the Free Software Foundation, Inc., 675 Mass Ave, * * Cambridge, MA 02139, USA. * * * * For the avoidance of doubt the "preferred form" of this * * code is one which is in an open non patent encumbered * * format. Where cryptographic key signing forms part of * * the process of creating an executable the information * * including keys needed to generate an equivalently * * functional executable are deemed to be part of the * * source code are deemed to be part of the source code. * * * ************************************************************ * last change: 2002/11/02 OS: Linux 2.5.45 * ************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "eata_generic.h" #include "eata_pio.h" static unsigned int ISAbases[MAXISA] = { 0x1F0, 0x170, 0x330, 0x230 }; static unsigned int ISAirqs[MAXISA] = { 14, 12, 15, 11 }; static unsigned char EISAbases[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; static unsigned int registered_HBAs; static struct Scsi_Host *last_HBA; static struct Scsi_Host *first_HBA; static unsigned char reg_IRQ[16]; static unsigned char reg_IRQL[16]; static unsigned long int_counter; static unsigned long queue_counter; static struct scsi_host_template driver_template; /* * eata_proc_info * inout : decides on the direction of the dataflow and the meaning of the * variables * buffer: If inout==FALSE data is being written to it else read from it * *start: If inout==FALSE start of the valid data in the buffer * offset: If inout==FALSE offset from the beginning of the imaginary file * from which we start writing into the buffer * length: If inout==FALSE max number of bytes to be written into the buffer * else number of bytes in the buffer */ static int eata_pio_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length, int rw) { int len = 0; off_t begin = 0, pos = 0; if (rw) return -ENOSYS; len += sprintf(buffer+len, "EATA (Extended Attachment) PIO driver version: " "%d.%d%s\n",VER_MAJOR, VER_MINOR, VER_SUB); len += sprintf(buffer + len, "queued commands: %10ld\n" "processed interrupts:%10ld\n", queue_counter, int_counter); len += sprintf(buffer + len, "\nscsi%-2d: HBA %.10s\n", shost->host_no, SD(shost)->name); len += sprintf(buffer + len, "Firmware revision: v%s\n", SD(shost)->revision); len += sprintf(buffer + len, "IO: PIO\n"); len += sprintf(buffer + len, "Base IO : %#.4x\n", (u32) shost->base); len += sprintf(buffer + len, "Host Bus: %s\n", (SD(shost)->bustype == 'P')?"PCI ": (SD(shost)->bustype == 'E')?"EISA":"ISA "); pos = begin + len; if (pos < offset) { len = 0; begin = pos; } if (pos > offset + length) goto stop_output; stop_output: DBG(DBG_PROC, printk("2pos: %ld offset: %ld len: %d\n", pos, offset, len)); *start = buffer + (offset - begin); /* Start of wanted data */ len -= (offset - begin); /* Start slop */ if (len > length) len = length; /* Ending slop */ DBG(DBG_PROC, printk("3pos: %ld offset: %ld len: %d\n", pos, offset, len)); return len; } static int eata_pio_release(struct Scsi_Host *sh) { hostdata *hd = SD(sh); if (sh->irq && reg_IRQ[sh->irq] == 1) free_irq(sh->irq, NULL); else reg_IRQ[sh->irq]--; if (SD(sh)->channel == 0) { if (sh->io_port && sh->n_io_port) release_region(sh->io_port, sh->n_io_port); } /* At this point the PCI reference can go */ if (hd->pdev) pci_dev_put(hd->pdev); return 1; } static void IncStat(struct scsi_pointer *SCp, unsigned int Increment) { SCp->ptr += Increment; if ((SCp->this_residual -= Increment) == 0) { if ((--SCp->buffers_residual) == 0) SCp->Status = 0; else { SCp->buffer++; SCp->ptr = sg_virt(SCp->buffer); SCp->this_residual = SCp->buffer->length; } } } static irqreturn_t eata_pio_int_handler(int irq, void *dev_id); static irqreturn_t do_eata_pio_int_handler(int irq, void *dev_id) { unsigned long flags; struct Scsi_Host *dev = dev_id; irqreturn_t ret; spin_lock_irqsave(dev->host_lock, flags); ret = eata_pio_int_handler(irq, dev_id); spin_unlock_irqrestore(dev->host_lock, flags); return ret; } static irqreturn_t eata_pio_int_handler(int irq, void *dev_id) { unsigned int eata_stat = 0xfffff; struct scsi_cmnd *cmd; hostdata *hd; struct eata_ccb *cp; unsigned long base; unsigned int x, z; struct Scsi_Host *sh; unsigned short zwickel = 0; unsigned char stat, odd; irqreturn_t ret = IRQ_NONE; for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev) { if (sh->irq != irq) continue; if (inb(sh->base + HA_RSTATUS) & HA_SBUSY) continue; int_counter++; ret = IRQ_HANDLED; hd = SD(sh); cp = &hd->ccb[0]; cmd = cp->cmd; base = cmd->device->host->base; do { stat = inb(base + HA_RSTATUS); if (stat & HA_SDRQ) { if (cp->DataIn) { z = 256; odd = 0; while ((cmd->SCp.Status) && ((z > 0) || (odd))) { if (odd) { *(cmd->SCp.ptr) = zwickel >> 8; IncStat(&cmd->SCp, 1); odd = 0; } x = min_t(unsigned int, z, cmd->SCp.this_residual / 2); insw(base + HA_RDATA, cmd->SCp.ptr, x); z -= x; IncStat(&cmd->SCp, 2 * x); if ((z > 0) && (cmd->SCp.this_residual == 1)) { zwickel = inw(base + HA_RDATA); *(cmd->SCp.ptr) = zwickel & 0xff; IncStat(&cmd->SCp, 1); z--; odd = 1; } } while (z > 0) { zwickel = inw(base + HA_RDATA); z--; } } else { /* cp->DataOut */ odd = 0; z = 256; while ((cmd->SCp.Status) && ((z > 0) || (odd))) { if (odd) { zwickel += *(cmd->SCp.ptr) << 8; IncStat(&cmd->SCp, 1); outw(zwickel, base + HA_RDATA); z--; odd = 0; } x = min_t(unsigned int, z, cmd->SCp.this_residual / 2); outsw(base + HA_RDATA, cmd->SCp.ptr, x); z -= x; IncStat(&cmd->SCp, 2 * x); if ((z > 0) && (cmd->SCp.this_residual == 1)) { zwickel = *(cmd->SCp.ptr); zwickel &= 0xff; IncStat(&cmd->SCp, 1); odd = 1; } } while (z > 0 || odd) { outw(zwickel, base + HA_RDATA); z--; odd = 0; } } } } while ((stat & HA_SDRQ) || ((stat & HA_SMORE) && hd->moresupport)); /* terminate handler if HBA goes busy again, i.e. transfers * more data */ if (stat & HA_SBUSY) break; /* OK, this is quite stupid, but I haven't found any correct * way to get HBA&SCSI status so far */ if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { cmd->result = (DID_OK << 16); hd->devflags |= (1 << cp->cp_id); } else if (hd->devflags & (1 << cp->cp_id)) cmd->result = (DID_OK << 16) + 0x02; else cmd->result = (DID_NO_CONNECT << 16); if (cp->status == LOCKED) { cp->status = FREE; eata_stat = inb(base + HA_RSTATUS); printk(KERN_CRIT "eata_pio: int_handler, freeing locked " "queueslot\n"); return ret; } #if DBG_INTR2 if (stat != 0x50) printk(KERN_DEBUG "stat: %#.2x, result: %#.8x\n", stat, cmd->result); #endif cp->status = FREE; /* now we can release the slot */ cmd->scsi_done(cmd); } return ret; } static inline unsigned int eata_pio_send_command(unsigned long base, unsigned char command) { unsigned int loop = 50; while (inb(base + HA_RSTATUS) & HA_SBUSY) if (--loop == 0) return 1; /* Enable interrupts for HBA. It is not the best way to do it at this * place, but I hope that it doesn't interfere with the IDE driver * initialization this way */ outb(HA_CTRL_8HEADS, base + HA_CTRLREG); outb(command, base + HA_WCOMMAND); return 0; } static int eata_pio_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { unsigned int x, y; unsigned long base; hostdata *hd; struct Scsi_Host *sh; struct eata_ccb *cp; queue_counter++; hd = HD(cmd); sh = cmd->device->host; base = sh->base; /* use only slot 0, as 2001 can handle only one cmd at a time */ y = x = 0; if (hd->ccb[y].status != FREE) { DBG(DBG_QUEUE, printk(KERN_EMERG "can_queue %d, x %d, y %d\n", sh->can_queue, x, y)); #if DEBUG_EATA panic(KERN_EMERG "eata_pio: run out of queue slots cmdno:%ld " "intrno: %ld\n", queue_counter, int_counter); #else panic(KERN_EMERG "eata_pio: run out of queue slots....\n"); #endif } cp = &hd->ccb[y]; memset(cp, 0, sizeof(struct eata_ccb)); cp->status = USED; /* claim free slot */ DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd, "eata_pio_queue 0x%p, y %d\n", cmd, y)); cmd->scsi_done = (void *) done; if (cmd->sc_data_direction == DMA_TO_DEVICE) cp->DataOut = 1; /* Output mode */ else cp->DataIn = 0; /* Input mode */ cp->Interpret = (cmd->device->id == hd->hostid); cp->cp_datalen = cpu_to_be32(scsi_bufflen(cmd)); cp->Auto_Req_Sen = 0; cp->cp_reqDMA = 0; cp->reqlen = 0; cp->cp_id = cmd->device->id; cp->cp_lun = cmd->device->lun; cp->cp_dispri = 0; cp->cp_identify = 1; memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); cp->cp_statDMA = 0; cp->cp_viraddr = cp; cp->cmd = cmd; cmd->host_scribble = (char *) &hd->ccb[y]; if (!scsi_bufflen(cmd)) { cmd->SCp.buffers_residual = 1; cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; cmd->SCp.buffer = NULL; } else { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.buffers_residual = scsi_sg_count(cmd); cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; } cmd->SCp.Status = (cmd->SCp.this_residual != 0); /* TRUE as long as bytes * are to transfer */ if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP)) { cmd->result = DID_BUS_BUSY << 16; scmd_printk(KERN_NOTICE, cmd, "eata_pio_queue pid 0x%p, HBA busy, " "returning DID_BUS_BUSY, done.\n", cmd); done(cmd); cp->status = FREE; return 0; } /* FIXME: timeout */ while (!(inb(base + HA_RSTATUS) & HA_SDRQ)) cpu_relax(); outsw(base + HA_RDATA, cp, hd->cplen); outb(EATA_CMD_PIO_TRUNC, base + HA_WCOMMAND); for (x = 0; x < hd->cppadlen; x++) outw(0, base + HA_RDATA); DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd, "Queued base %#.4lx cmd: 0x%p " "slot %d irq %d\n", sh->base, cmd, y, sh->irq)); return 0; } static DEF_SCSI_QCMD(eata_pio_queue) static int eata_pio_abort(struct scsi_cmnd *cmd) { unsigned int loop = 100; DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd, "eata_pio_abort called pid: 0x%p\n", cmd)); while (inb(cmd->device->host->base + HA_RAUXSTAT) & HA_ABUSY) if (--loop == 0) { printk(KERN_WARNING "eata_pio: abort, timeout error.\n"); return FAILED; } if (CD(cmd)->status == FREE) { DBG(DBG_ABNORM, printk(KERN_WARNING "Returning: SCSI_ABORT_NOT_RUNNING\n")); return FAILED; } if (CD(cmd)->status == USED) { DBG(DBG_ABNORM, printk(KERN_WARNING "Returning: SCSI_ABORT_BUSY\n")); /* We want to sleep a bit more here */ return FAILED; /* SNOOZE */ } if (CD(cmd)->status == RESET) { printk(KERN_WARNING "eata_pio: abort, command reset error.\n"); return FAILED; } if (CD(cmd)->status == LOCKED) { DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio: abort, queue slot " "locked.\n")); return FAILED; } panic("eata_pio: abort: invalid slot status\n"); } static int eata_pio_host_reset(struct scsi_cmnd *cmd) { unsigned int x, limit = 0; unsigned char success = 0; struct scsi_cmnd *sp; struct Scsi_Host *host = cmd->device->host; DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd, "eata_pio_reset called\n")); spin_lock_irq(host->host_lock); if (HD(cmd)->state == RESET) { printk(KERN_WARNING "eata_pio_reset: exit, already in reset.\n"); spin_unlock_irq(host->host_lock); return FAILED; } /* force all slots to be free */ for (x = 0; x < cmd->device->host->can_queue; x++) { if (HD(cmd)->ccb[x].status == FREE) continue; sp = HD(cmd)->ccb[x].cmd; HD(cmd)->ccb[x].status = RESET; printk(KERN_WARNING "eata_pio_reset: slot %d in reset.\n", x); if (sp == NULL) panic("eata_pio_reset: slot %d, sp==NULL.\n", x); } /* hard reset the HBA */ outb(EATA_CMD_RESET, cmd->device->host->base + HA_WCOMMAND); DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: board reset done.\n")); HD(cmd)->state = RESET; spin_unlock_irq(host->host_lock); msleep(3000); spin_lock_irq(host->host_lock); DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: interrupts disabled, " "loops %d.\n", limit)); for (x = 0; x < cmd->device->host->can_queue; x++) { /* Skip slots already set free by interrupt */ if (HD(cmd)->ccb[x].status != RESET) continue; sp = HD(cmd)->ccb[x].cmd; sp->result = DID_RESET << 16; /* This mailbox is terminated */ printk(KERN_WARNING "eata_pio_reset: reset ccb %d.\n", x); HD(cmd)->ccb[x].status = FREE; sp->scsi_done(sp); } HD(cmd)->state = 0; spin_unlock_irq(host->host_lock); if (success) { /* hmmm... */ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: exit, success.\n")); return SUCCESS; } else { DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: exit, wakeup.\n")); return FAILED; } } static char *get_pio_board_data(unsigned long base, unsigned int irq, unsigned int id, unsigned long cplen, unsigned short cppadlen) { struct eata_ccb cp; static char buff[256]; int z; memset(&cp, 0, sizeof(struct eata_ccb)); memset(buff, 0, sizeof(buff)); cp.DataIn = 1; cp.Interpret = 1; /* Interpret command */ cp.cp_datalen = cpu_to_be32(254); cp.cp_dataDMA = cpu_to_be32(0); cp.cp_id = id; cp.cp_lun = 0; cp.cp_cdb[0] = INQUIRY; cp.cp_cdb[1] = 0; cp.cp_cdb[2] = 0; cp.cp_cdb[3] = 0; cp.cp_cdb[4] = 254; cp.cp_cdb[5] = 0; if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP)) return NULL; while (!(inb(base + HA_RSTATUS) & HA_SDRQ)) cpu_relax(); outsw(base + HA_RDATA, &cp, cplen); outb(EATA_CMD_PIO_TRUNC, base + HA_WCOMMAND); for (z = 0; z < cppadlen; z++) outw(0, base + HA_RDATA); while (inb(base + HA_RSTATUS) & HA_SBUSY) cpu_relax(); if (inb(base + HA_RSTATUS) & HA_SERROR) return NULL; else if (!(inb(base + HA_RSTATUS) & HA_SDRQ)) return NULL; else { insw(base + HA_RDATA, &buff, 127); while (inb(base + HA_RSTATUS) & HA_SDRQ) inw(base + HA_RDATA); return buff; } } static int get_pio_conf_PIO(unsigned long base, struct get_conf *buf) { unsigned long loop = HZ / 2; int z; unsigned short *p; if (!request_region(base, 9, "eata_pio")) return 0; memset(buf, 0, sizeof(struct get_conf)); while (inb(base + HA_RSTATUS) & HA_SBUSY) if (--loop == 0) goto fail; DBG(DBG_PIO && DBG_PROBE, printk(KERN_DEBUG "Issuing PIO READ CONFIG to HBA at %#lx\n", base)); eata_pio_send_command(base, EATA_CMD_PIO_READ_CONFIG); loop = 50; for (p = (unsigned short *) buf; (long) p <= ((long) buf + (sizeof(struct get_conf) / 2)); p++) { while (!(inb(base + HA_RSTATUS) & HA_SDRQ)) if (--loop == 0) goto fail; loop = 50; *p = inw(base + HA_RDATA); } if (inb(base + HA_RSTATUS) & HA_SERROR) { DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during " "transfer for HBA at %lx\n", base)); goto fail; } if (cpu_to_be32(EATA_SIGNATURE) != buf->signature) goto fail; DBG(DBG_PIO && DBG_PROBE, printk(KERN_NOTICE "EATA Controller found " "at %#4lx EATA Level: %x\n", base, (unsigned int) (buf->version))); while (inb(base + HA_RSTATUS) & HA_SDRQ) inw(base + HA_RDATA); if (!ALLOW_DMA_BOARDS) { for (z = 0; z < MAXISA; z++) if (base == ISAbases[z]) { buf->IRQ = ISAirqs[z]; break; } } return 1; fail: release_region(base, 9); return 0; } static void print_pio_config(struct get_conf *gc) { printk("Please check values: (read config data)\n"); printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d\n", be32_to_cpu(gc->len), gc->version, gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support); printk("HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n", gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2], gc->scsi_id[1], be16_to_cpu(gc->queuesiz), be16_to_cpu(gc->SGsiz), gc->SECOND); printk("IRQ:%d IRQT:%d FORCADR:%d MCH:%d RIDQ:%d\n", gc->IRQ, gc->IRQ_TR, gc->FORCADR, gc->MAX_CHAN, gc->ID_qest); } static unsigned int print_selftest(unsigned int base) { unsigned char buffer[512]; #ifdef VERBOSE_SETUP int z; #endif printk("eata_pio: executing controller self test & setup...\n"); while (inb(base + HA_RSTATUS) & HA_SBUSY); outb(EATA_CMD_PIO_SETUPTEST, base + HA_WCOMMAND); do { while (inb(base + HA_RSTATUS) & HA_SBUSY) /* nothing */ ; if (inb(base + HA_RSTATUS) & HA_SDRQ) { insw(base + HA_RDATA, &buffer, 256); #ifdef VERBOSE_SETUP /* no beeps please... */ for (z = 0; z < 511 && buffer[z]; z++) if (buffer[z] != 7) printk("%c", buffer[z]); #endif } } while (inb(base + HA_RSTATUS) & (HA_SBUSY | HA_SDRQ)); return (!(inb(base + HA_RSTATUS) & HA_SERROR)); } static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev) { unsigned long size = 0; char *buff; unsigned long cplen; unsigned short cppadlen; struct Scsi_Host *sh; hostdata *hd; DBG(DBG_REGISTER, print_pio_config(gc)); if (gc->DMA_support) { printk("HBA at %#.4lx supports DMA. Please use EATA-DMA driver.\n", base); if (!ALLOW_DMA_BOARDS) return 0; } if ((buff = get_pio_board_data(base, gc->IRQ, gc->scsi_id[3], cplen = (cpu_to_be32(gc->cplen) + 1) / 2, cppadlen = (cpu_to_be16(gc->cppadlen) + 1) / 2)) == NULL) { printk("HBA at %#lx didn't react on INQUIRY. Sorry.\n", base); return 0; } if (!print_selftest(base) && !ALLOW_DMA_BOARDS) { printk("HBA at %#lx failed while performing self test & setup.\n", base); return 0; } size = sizeof(hostdata) + (sizeof(struct eata_ccb) * be16_to_cpu(gc->queuesiz)); sh = scsi_register(&driver_template, size); if (sh == NULL) return 0; if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */ if (!request_irq(gc->IRQ, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", sh)) { reg_IRQ[gc->IRQ]++; if (!gc->IRQ_TR) reg_IRQL[gc->IRQ] = 1; /* IRQ is edge triggered */ } else { printk("Couldn't allocate IRQ %d, Sorry.\n", gc->IRQ); return 0; } } else { /* More than one HBA on this IRQ */ if (reg_IRQL[gc->IRQ]) { printk("Can't support more than one HBA on this IRQ,\n" " if the IRQ is edge triggered. Sorry.\n"); return 0; } else reg_IRQ[gc->IRQ]++; } hd = SD(sh); memset(hd->ccb, 0, (sizeof(struct eata_ccb) * be16_to_cpu(gc->queuesiz))); memset(hd->reads, 0, sizeof(hd->reads)); strlcpy(SD(sh)->vendor, &buff[8], sizeof(SD(sh)->vendor)); strlcpy(SD(sh)->name, &buff[16], sizeof(SD(sh)->name)); SD(sh)->revision[0] = buff[32]; SD(sh)->revision[1] = buff[33]; SD(sh)->revision[2] = buff[34]; SD(sh)->revision[3] = '.'; SD(sh)->revision[4] = buff[35]; SD(sh)->revision[5] = 0; switch (be32_to_cpu(gc->len)) { case 0x1c: SD(sh)->EATA_revision = 'a'; break; case 0x1e: SD(sh)->EATA_revision = 'b'; break; case 0x22: SD(sh)->EATA_revision = 'c'; break; case 0x24: SD(sh)->EATA_revision = 'z'; default: SD(sh)->EATA_revision = '?'; } if (be32_to_cpu(gc->len) >= 0x22) { if (gc->is_PCI) hd->bustype = IS_PCI; else if (gc->is_EISA) hd->bustype = IS_EISA; else hd->bustype = IS_ISA; } else { if (buff[21] == '4') hd->bustype = IS_PCI; else if (buff[21] == '2') hd->bustype = IS_EISA; else hd->bustype = IS_ISA; } SD(sh)->cplen = cplen; SD(sh)->cppadlen = cppadlen; SD(sh)->hostid = gc->scsi_id[3]; SD(sh)->devflags = 1 << gc->scsi_id[3]; SD(sh)->moresupport = gc->MORE_support; sh->unique_id = base; sh->base = base; sh->io_port = base; sh->n_io_port = 9; sh->irq = gc->IRQ; sh->dma_channel = PIO; sh->this_id = gc->scsi_id[3]; sh->can_queue = 1; sh->cmd_per_lun = 1; sh->sg_tablesize = SG_ALL; hd->channel = 0; hd->pdev = pci_dev_get(pdev); /* Keep a PCI reference */ sh->max_id = 8; sh->max_lun = 8; if (gc->SECOND) hd->primary = 0; else hd->primary = 1; hd->next = NULL; /* build a linked list of all HBAs */ hd->prev = last_HBA; if (hd->prev != NULL) SD(hd->prev)->next = sh; last_HBA = sh; if (first_HBA == NULL) first_HBA = sh; registered_HBAs++; return (1); } static void find_pio_ISA(struct get_conf *buf) { int i; for (i = 0; i < MAXISA; i++) { if (!ISAbases[i]) continue; if (!get_pio_conf_PIO(ISAbases[i], buf)) continue; if (!register_pio_HBA(ISAbases[i], buf, NULL)) release_region(ISAbases[i], 9); else ISAbases[i] = 0; } return; } static void find_pio_EISA(struct get_conf *buf) { u32 base; int i; #ifdef CHECKPAL u8 pal1, pal2, pal3; #endif for (i = 0; i < MAXEISA; i++) { if (EISAbases[i]) { /* Still a possibility ? */ base = 0x1c88 + (i * 0x1000); #ifdef CHECKPAL pal1 = inb((u16) base - 8); pal2 = inb((u16) base - 7); pal3 = inb((u16) base - 6); if (((pal1 == 0x12) && (pal2 == 0x14)) || ((pal1 == 0x38) && (pal2 == 0xa3) && (pal3 == 0x82)) || ((pal1 == 0x06) && (pal2 == 0x94) && (pal3 == 0x24))) { DBG(DBG_PROBE, printk(KERN_NOTICE "EISA EATA id tags found: " "%x %x %x \n", (int) pal1, (int) pal2, (int) pal3)); #endif if (get_pio_conf_PIO(base, buf)) { DBG(DBG_PROBE && DBG_EISA, print_pio_config(buf)); if (buf->IRQ) { if (!register_pio_HBA(base, buf, NULL)) release_region(base, 9); } else { printk(KERN_NOTICE "eata_dma: No valid IRQ. HBA " "removed from list\n"); release_region(base, 9); } } /* Nothing found here so we take it from the list */ EISAbases[i] = 0; #ifdef CHECKPAL } #endif } } return; } static void find_pio_PCI(struct get_conf *buf) { #ifndef CONFIG_PCI printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n"); #else struct pci_dev *dev = NULL; unsigned long base, x; while ((dev = pci_get_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT, dev)) != NULL) { DBG(DBG_PROBE && DBG_PCI, printk("eata_pio: find_PCI, HBA at %s\n", pci_name(dev))); if (pci_enable_device(dev)) continue; pci_set_master(dev); base = pci_resource_flags(dev, 0); if (base & IORESOURCE_MEM) { printk("eata_pio: invalid base address of device %s\n", pci_name(dev)); continue; } base = pci_resource_start(dev, 0); /* EISA tag there ? */ if ((inb(base) == 0x12) && (inb(base + 1) == 0x14)) continue; /* Jep, it's forced, so move on */ base += 0x10; /* Now, THIS is the real address */ if (base != 0x1f8) { /* We didn't find it in the primary search */ if (get_pio_conf_PIO(base, buf)) { if (buf->FORCADR) { /* If the address is forced */ release_region(base, 9); continue; /* we'll find it later */ } /* OK. We made it till here, so we can go now * and register it. We only have to check and * eventually remove it from the EISA and ISA list */ if (!register_pio_HBA(base, buf, dev)) { release_region(base, 9); continue; } if (base < 0x1000) { for (x = 0; x < MAXISA; ++x) { if (ISAbases[x] == base) { ISAbases[x] = 0; break; } } } else if ((base & 0x0fff) == 0x0c88) { x = (base >> 12) & 0x0f; EISAbases[x] = 0; } } #ifdef CHECK_BLINK else if (check_blink_state(base)) { printk("eata_pio: HBA is in BLINK state.\n" "Consult your HBAs manual to correct this.\n"); } #endif } } #endif /* #ifndef CONFIG_PCI */ } static int eata_pio_detect(struct scsi_host_template *tpnt) { struct Scsi_Host *HBA_ptr; struct get_conf gc; int i; find_pio_PCI(&gc); find_pio_EISA(&gc); find_pio_ISA(&gc); for (i = 0; i <= MAXIRQ; i++) if (reg_IRQ[i]) request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL); HBA_ptr = first_HBA; if (registered_HBAs != 0) { printk("EATA (Extended Attachment) PIO driver version: %d.%d%s\n" "(c) 1993-95 Michael Neuffer, neuffer@goofy.zdv.uni-mainz.de\n" " Alfred Arnold, a.arnold@kfa-juelich.de\n" "This release only supports DASD devices (harddisks)\n", VER_MAJOR, VER_MINOR, VER_SUB); printk("Registered HBAs:\n"); printk("HBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: Ch: ID: Pr:" " QS: SG: CPL:\n"); for (i = 1; i <= registered_HBAs; i++) { printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4lx %2d %d %d %c" " %2d %2d %2d\n", HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision, SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P') ? "PCI " : (SD(HBA_ptr)->bustype == 'E') ? "EISA" : "ISA ", HBA_ptr->base, HBA_ptr->irq, SD(HBA_ptr)->channel, HBA_ptr->this_id, SD(HBA_ptr)->primary ? 'Y' : 'N', HBA_ptr->can_queue, HBA_ptr->sg_tablesize, HBA_ptr->cmd_per_lun); HBA_ptr = SD(HBA_ptr)->next; } } return (registered_HBAs); } static struct scsi_host_template driver_template = { .proc_name = "eata_pio", .name = "EATA (Extended Attachment) PIO driver", .proc_info = eata_pio_proc_info, .detect = eata_pio_detect, .release = eata_pio_release, .queuecommand = eata_pio_queue, .eh_abort_handler = eata_pio_abort, .eh_host_reset_handler = eata_pio_host_reset, .use_clustering = ENABLE_CLUSTERING, }; MODULE_AUTHOR("Michael Neuffer, Alfred Arnold"); MODULE_DESCRIPTION("EATA SCSI PIO driver"); MODULE_LICENSE("GPL"); #include "scsi_module.c"
gpl-2.0
HONO/lge-kernel-p930-Gingerbread-mod
arch/um/kernel/exitcode.c
8439
1688
/* * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/ctype.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/types.h> #include <asm/uaccess.h> /* * If read and write race, the read will still atomically read a valid * value. */ int uml_exitcode = 0; static int exitcode_proc_show(struct seq_file *m, void *v) { int val; /* * Save uml_exitcode in a local so that we don't need to guarantee * that sprintf accesses it atomically. */ val = uml_exitcode; seq_printf(m, "%d\n", val); return 0; } static int exitcode_proc_open(struct inode *inode, struct file *file) { return single_open(file, exitcode_proc_show, NULL); } static ssize_t exitcode_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char *end, buf[sizeof("nnnnn\0")]; int tmp; if (copy_from_user(buf, buffer, count)) return -EFAULT; tmp = simple_strtol(buf, &end, 0); if ((*end != '\0') && !isspace(*end)) return -EINVAL; uml_exitcode = tmp; return count; } static const struct file_operations exitcode_proc_fops = { .owner = THIS_MODULE, .open = exitcode_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = exitcode_proc_write, }; static int make_proc_exitcode(void) { struct proc_dir_entry *ent; ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops); if (ent == NULL) { printk(KERN_WARNING "make_proc_exitcode : Failed to register " "/proc/exitcode\n"); return 0; } return 0; } __initcall(make_proc_exitcode);
gpl-2.0
CyanHacker-Lollipop/kernel_lge_g3
drivers/usb/host/whci/init.c
9719
5014
/* * Wireless Host Controller (WHC) initialization. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/dma-mapping.h> #include <linux/uwb/umc.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" /* * Reset the host controller. */ static void whc_hw_reset(struct whc *whc) { le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD); whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0, 100, "reset"); } static void whc_hw_init_di_buf(struct whc *whc) { int d; /* Disable all entries in the Device Information buffer. */ for (d = 0; d < whc->n_devices; d++) whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE; le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR); } static void whc_hw_init_dn_buf(struct whc *whc) { /* Clear the Device Notification buffer to ensure the V (valid) * bits are clear. */ memset(whc->dn_buf, 0, 4096); le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR); } int whc_init(struct whc *whc) { u32 whcsparams; int ret, i; resource_size_t start, len; spin_lock_init(&whc->lock); mutex_init(&whc->mutex); init_waitqueue_head(&whc->cmd_wq); init_waitqueue_head(&whc->async_list_wq); init_waitqueue_head(&whc->periodic_list_wq); whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev)); if (whc->workqueue == NULL) { ret = -ENOMEM; goto error; } INIT_WORK(&whc->dn_work, whc_dn_work); INIT_WORK(&whc->async_work, scan_async_work); INIT_LIST_HEAD(&whc->async_list); INIT_LIST_HEAD(&whc->async_removed_list); INIT_WORK(&whc->periodic_work, scan_periodic_work); for (i = 0; i < 5; i++) INIT_LIST_HEAD(&whc->periodic_list[i]); INIT_LIST_HEAD(&whc->periodic_removed_list); /* Map HC registers. */ start = whc->umc->resource.start; len = whc->umc->resource.end - start + 1; if (!request_mem_region(start, len, "whci-hc")) { dev_err(&whc->umc->dev, "can't request HC region\n"); ret = -EBUSY; goto error; } whc->base_phys = start; whc->base = ioremap(start, len); if (!whc->base) { dev_err(&whc->umc->dev, "ioremap\n"); ret = -ENOMEM; goto error; } whc_hw_reset(whc); /* Read maximum number of devices, keys and MMC IEs. */ whcsparams = le_readl(whc->base + WHCSPARAMS); whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams); whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams); whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams); dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n", whc->n_devices, whc->n_keys, whc->n_mmc_ies); whc->qset_pool = dma_pool_create("qset", &whc->umc->dev, sizeof(struct whc_qset), 64, 0); if (whc->qset_pool == NULL) { ret = -ENOMEM; goto error; } ret = asl_init(whc); if (ret < 0) goto error; ret = pzl_init(whc); if (ret < 0) goto error; /* Allocate and initialize a buffer for generic commands, the Device Information buffer, and the Device Notification buffer. */ whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, &whc->gen_cmd_buf_dma, GFP_KERNEL); if (whc->gen_cmd_buf == NULL) { ret = -ENOMEM; goto error; } whc->dn_buf = dma_alloc_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, &whc->dn_buf_dma, GFP_KERNEL); if (!whc->dn_buf) { ret = -ENOMEM; goto error; } whc_hw_init_dn_buf(whc); whc->di_buf = dma_alloc_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices, &whc->di_buf_dma, GFP_KERNEL); if (!whc->di_buf) { ret = -ENOMEM; goto error; } whc_hw_init_di_buf(whc); return 0; error: whc_clean_up(whc); return ret; } void whc_clean_up(struct whc *whc) { resource_size_t len; if (whc->di_buf) dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices, whc->di_buf, whc->di_buf_dma); if (whc->dn_buf) dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, whc->dn_buf, whc->dn_buf_dma); if (whc->gen_cmd_buf) dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, whc->gen_cmd_buf, whc->gen_cmd_buf_dma); pzl_clean_up(whc); asl_clean_up(whc); if (whc->qset_pool) dma_pool_destroy(whc->qset_pool); len = resource_size(&whc->umc->resource); if (whc->base) iounmap(whc->base); if (whc->base_phys) release_mem_region(whc->base_phys, len); if (whc->workqueue) destroy_workqueue(whc->workqueue); }
gpl-2.0
Ninpo/ninphetamine3
drivers/scsi/fnic/vnic_cq.c
15095
2760
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include "vnic_dev.h" #include "vnic_cq.h" void vnic_cq_free(struct vnic_cq *cq) { vnic_dev_free_desc_ring(cq->vdev, &cq->ring); cq->ctrl = NULL; } int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, unsigned int desc_count, unsigned int desc_size) { int err; cq->index = index; cq->vdev = vdev; cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); if (!cq->ctrl) { printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index); return -EINVAL; } err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); if (err) return err; return 0; } void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, unsigned int cq_tail_color, unsigned int interrupt_enable, unsigned int cq_entry_enable, unsigned int cq_message_enable, unsigned int interrupt_offset, u64 cq_message_addr) { u64 paddr; paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; writeq(paddr, &cq->ctrl->ring_base); iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); iowrite32(color_enable, &cq->ctrl->color_enable); iowrite32(cq_head, &cq->ctrl->cq_head); iowrite32(cq_tail, &cq->ctrl->cq_tail); iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); writeq(cq_message_addr, &cq->ctrl->cq_message_addr); } void vnic_cq_clean(struct vnic_cq *cq) { cq->to_clean = 0; cq->last_color = 0; iowrite32(0, &cq->ctrl->cq_head); iowrite32(0, &cq->ctrl->cq_tail); iowrite32(1, &cq->ctrl->cq_tail_color); vnic_dev_clear_desc_ring(&cq->ring); }
gpl-2.0
GustavoRD78/78Kernel-Xperia-Mini
drivers/pcmcia/pcmcia_ioctl.c
504
26255
/* * pcmcia_ioctl.c -- ioctl interface for cardmgr and cardctl * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * (C) 1999 David A. Hinds * (C) 2003 - 2004 Dominik Brodowski */ /* * This file will go away soon. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/ioctl.h> #include <linux/proc_fs.h> #include <linux/poll.h> #include <linux/pci.h> #include <linux/seq_file.h> #include <linux/smp_lock.h> #include <linux/workqueue.h> #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include <pcmcia/ss.h> #include "cs_internal.h" static int major_dev = -1; /* Device user information */ #define MAX_EVENTS 32 #define USER_MAGIC 0x7ea4 #define CHECK_USER(u) \ (((u) == NULL) || ((u)->user_magic != USER_MAGIC)) typedef struct user_info_t { u_int user_magic; int event_head, event_tail; event_t event[MAX_EVENTS]; struct user_info_t *next; struct pcmcia_socket *socket; } user_info_t; #ifdef CONFIG_PCMCIA_DEBUG extern int ds_pc_debug; #define ds_dbg(lvl, fmt, arg...) do { \ if (ds_pc_debug >= lvl) \ printk(KERN_DEBUG "ds: " fmt , ## arg); \ } while (0) #else #define ds_dbg(lvl, fmt, arg...) do { } while (0) #endif static struct pcmcia_device *get_pcmcia_device(struct pcmcia_socket *s, unsigned int function) { struct pcmcia_device *p_dev = NULL; unsigned long flags; spin_lock_irqsave(&pcmcia_dev_list_lock, flags); list_for_each_entry(p_dev, &s->devices_list, socket_device_list) { if (p_dev->func == function) { spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); return pcmcia_get_dev(p_dev); } } spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); return NULL; } /* backwards-compatible accessing of driver --- by name! */ static struct pcmcia_driver *get_pcmcia_driver(dev_info_t *dev_info) { struct device_driver *drv; struct pcmcia_driver *p_drv; drv = driver_find((char *) dev_info, &pcmcia_bus_type); if (!drv) return NULL; p_drv = container_of(drv, struct pcmcia_driver, drv); return (p_drv); } #ifdef CONFIG_PROC_FS static struct proc_dir_entry *proc_pccard = NULL; static int proc_read_drivers_callback(struct device_driver *driver, void *_m) { struct seq_file *m = _m; struct pcmcia_driver *p_drv = container_of(driver, struct pcmcia_driver, drv); seq_printf(m, "%-24.24s 1 %d\n", p_drv->drv.name, #ifdef CONFIG_MODULE_UNLOAD (p_drv->owner) ? module_refcount(p_drv->owner) : 1 #else 1 #endif ); return 0; } static int pccard_drivers_proc_show(struct seq_file *m, void *v) { return bus_for_each_drv(&pcmcia_bus_type, NULL, m, proc_read_drivers_callback); } static int pccard_drivers_proc_open(struct inode *inode, struct file *file) { return single_open(file, pccard_drivers_proc_show, NULL); } static const struct file_operations pccard_drivers_proc_fops = { .owner = THIS_MODULE, .open = pccard_drivers_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif #ifdef CONFIG_PCMCIA_PROBE static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) { int irq; u32 mask; irq = adj->resource.irq.IRQ; if ((irq < 0) || (irq > 15)) return -EINVAL; if (adj->Action != REMOVE_MANAGED_RESOURCE) return 0; mask = 1 << irq; if (!(s->irq_mask & mask)) return 0; s->irq_mask &= ~mask; return 0; } #else static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) { return 0; } #endif static int pcmcia_adjust_resource_info(adjust_t *adj) { struct pcmcia_socket *s; int ret = -ENOSYS; unsigned long flags; down_read(&pcmcia_socket_list_rwsem); list_for_each_entry(s, &pcmcia_socket_list, socket_list) { if (adj->Resource == RES_IRQ) ret = adjust_irq(s, adj); else if (s->resource_ops->add_io) { unsigned long begin, end; /* you can't use the old interface if the new * one was used before */ spin_lock_irqsave(&s->lock, flags); if ((s->resource_setup_new) && !(s->resource_setup_old)) { spin_unlock_irqrestore(&s->lock, flags); continue; } else if (!(s->resource_setup_old)) s->resource_setup_old = 1; spin_unlock_irqrestore(&s->lock, flags); switch (adj->Resource) { case RES_MEMORY_RANGE: begin = adj->resource.memory.Base; end = adj->resource.memory.Base + adj->resource.memory.Size - 1; if (s->resource_ops->add_mem) ret =s->resource_ops->add_mem(s, adj->Action, begin, end); case RES_IO_RANGE: begin = adj->resource.io.BasePort; end = adj->resource.io.BasePort + adj->resource.io.NumPorts - 1; if (s->resource_ops->add_io) ret = s->resource_ops->add_io(s, adj->Action, begin, end); } if (!ret) { /* as there's no way we know this is the * last call to adjust_resource_info, we * always need to assume this is the latest * one... */ spin_lock_irqsave(&s->lock, flags); s->resource_setup_done = 1; spin_unlock_irqrestore(&s->lock, flags); } } } up_read(&pcmcia_socket_list_rwsem); return (ret); } /** pccard_get_status * * Get the current socket state bits. We don't support the latched * SocketState yet: I haven't seen any point for it. */ static int pccard_get_status(struct pcmcia_socket *s, struct pcmcia_device *p_dev, cs_status_t *status) { config_t *c; int val; s->ops->get_status(s, &val); status->CardState = status->SocketState = 0; status->CardState |= (val & SS_DETECT) ? CS_EVENT_CARD_DETECT : 0; status->CardState |= (val & SS_CARDBUS) ? CS_EVENT_CB_DETECT : 0; status->CardState |= (val & SS_3VCARD) ? CS_EVENT_3VCARD : 0; status->CardState |= (val & SS_XVCARD) ? CS_EVENT_XVCARD : 0; if (s->state & SOCKET_SUSPEND) status->CardState |= CS_EVENT_PM_SUSPEND; if (!(s->state & SOCKET_PRESENT)) return -ENODEV; c = (p_dev) ? p_dev->function_config : NULL; if ((c != NULL) && (c->state & CONFIG_LOCKED) && (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) { u_char reg; if (c->CardValues & PRESENT_PIN_REPLACE) { pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg); status->CardState |= (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0; status->CardState |= (reg & PRR_READY_STATUS) ? CS_EVENT_READY_CHANGE : 0; status->CardState |= (reg & PRR_BVD2_STATUS) ? CS_EVENT_BATTERY_LOW : 0; status->CardState |= (reg & PRR_BVD1_STATUS) ? CS_EVENT_BATTERY_DEAD : 0; } else { /* No PRR? Then assume we're always ready */ status->CardState |= CS_EVENT_READY_CHANGE; } if (c->CardValues & PRESENT_EXT_STATUS) { pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg); status->CardState |= (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0; } return 0; } status->CardState |= (val & SS_WRPROT) ? CS_EVENT_WRITE_PROTECT : 0; status->CardState |= (val & SS_BATDEAD) ? CS_EVENT_BATTERY_DEAD : 0; status->CardState |= (val & SS_BATWARN) ? CS_EVENT_BATTERY_LOW : 0; status->CardState |= (val & SS_READY) ? CS_EVENT_READY_CHANGE : 0; return 0; } /* pccard_get_status */ static int pccard_get_configuration_info(struct pcmcia_socket *s, struct pcmcia_device *p_dev, config_info_t *config) { config_t *c; if (!(s->state & SOCKET_PRESENT)) return -ENODEV; #ifdef CONFIG_CARDBUS if (s->state & SOCKET_CARDBUS) { memset(config, 0, sizeof(config_info_t)); config->Vcc = s->socket.Vcc; config->Vpp1 = config->Vpp2 = s->socket.Vpp; config->Option = s->cb_dev->subordinate->number; if (s->state & SOCKET_CARDBUS_CONFIG) { config->Attributes = CONF_VALID_CLIENT; config->IntType = INT_CARDBUS; config->AssignedIRQ = s->irq.AssignedIRQ; if (config->AssignedIRQ) config->Attributes |= CONF_ENABLE_IRQ; if (s->io[0].res) { config->BasePort1 = s->io[0].res->start; config->NumPorts1 = s->io[0].res->end - config->BasePort1 + 1; } } return 0; } #endif if (p_dev) { c = p_dev->function_config; config->Function = p_dev->func; } else { c = NULL; config->Function = 0; } if ((c == NULL) || !(c->state & CONFIG_LOCKED)) { config->Attributes = 0; config->Vcc = s->socket.Vcc; config->Vpp1 = config->Vpp2 = s->socket.Vpp; return 0; } config->Attributes = c->Attributes | CONF_VALID_CLIENT; config->Vcc = s->socket.Vcc; config->Vpp1 = config->Vpp2 = s->socket.Vpp; config->IntType = c->IntType; config->ConfigBase = c->ConfigBase; config->Status = c->Status; config->Pin = c->Pin; config->Copy = c->Copy; config->Option = c->Option; config->ExtStatus = c->ExtStatus; config->Present = config->CardValues = c->CardValues; config->IRQAttributes = c->irq.Attributes; config->AssignedIRQ = s->irq.AssignedIRQ; config->BasePort1 = c->io.BasePort1; config->NumPorts1 = c->io.NumPorts1; config->Attributes1 = c->io.Attributes1; config->BasePort2 = c->io.BasePort2; config->NumPorts2 = c->io.NumPorts2; config->Attributes2 = c->io.Attributes2; config->IOAddrLines = c->io.IOAddrLines; return 0; } /* pccard_get_configuration_info */ /*====================================================================== These manage a ring buffer of events pending for one user process ======================================================================*/ static int queue_empty(user_info_t *user) { return (user->event_head == user->event_tail); } static event_t get_queued_event(user_info_t *user) { user->event_tail = (user->event_tail+1) % MAX_EVENTS; return user->event[user->event_tail]; } static void queue_event(user_info_t *user, event_t event) { user->event_head = (user->event_head+1) % MAX_EVENTS; if (user->event_head == user->event_tail) user->event_tail = (user->event_tail+1) % MAX_EVENTS; user->event[user->event_head] = event; } void handle_event(struct pcmcia_socket *s, event_t event) { user_info_t *user; for (user = s->user; user; user = user->next) queue_event(user, event); wake_up_interruptible(&s->queue); } /*====================================================================== bind_request() and bind_device() are merged by now. Register_client() is called right at the end of bind_request(), during the driver's ->attach() call. Individual descriptions: bind_request() connects a socket to a particular client driver. It looks up the specified device ID in the list of registered drivers, binds it to the socket, and tries to create an instance of the device. unbind_request() deletes a driver instance. Bind_device() associates a device driver with a particular socket. It is normally called by Driver Services after it has identified a newly inserted card. An instance of that driver will then be eligible to register as a client of this socket. Register_client() uses the dev_info_t handle to match the caller with a socket. The driver must have already been bound to a socket with bind_device() -- in fact, bind_device() allocates the client structure that will be used. ======================================================================*/ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info) { struct pcmcia_driver *p_drv; struct pcmcia_device *p_dev; int ret = 0; unsigned long flags; s = pcmcia_get_socket(s); if (!s) return -EINVAL; ds_dbg(2, "bind_request(%d, '%s')\n", s->sock, (char *)bind_info->dev_info); p_drv = get_pcmcia_driver(&bind_info->dev_info); if (!p_drv) { ret = -EINVAL; goto err_put; } if (!try_module_get(p_drv->owner)) { ret = -EINVAL; goto err_put_driver; } spin_lock_irqsave(&pcmcia_dev_list_lock, flags); list_for_each_entry(p_dev, &s->devices_list, socket_device_list) { if (p_dev->func == bind_info->function) { if ((p_dev->dev.driver == &p_drv->drv)) { if (p_dev->cardmgr) { /* if there's already a device * registered, and it was registered * by userspace before, we need to * return the "instance". */ spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); bind_info->instance = p_dev; ret = -EBUSY; goto err_put_module; } else { /* the correct driver managed to bind * itself magically to the correct * device. */ spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); p_dev->cardmgr = p_drv; ret = 0; goto err_put_module; } } else if (!p_dev->dev.driver) { /* there's already a device available where * no device has been bound to yet. So we don't * need to register a device! */ spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); goto rescan; } } } spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); p_dev = pcmcia_device_add(s, bind_info->function); if (!p_dev) { ret = -EIO; goto err_put_module; } rescan: p_dev->cardmgr = p_drv; /* if a driver is already running, we can abort */ if (p_dev->dev.driver) goto err_put_module; /* * Prevent this racing with a card insertion. */ mutex_lock(&s->skt_mutex); ret = bus_rescan_devices(&pcmcia_bus_type); mutex_unlock(&s->skt_mutex); if (ret) goto err_put_module; /* check whether the driver indeed matched. I don't care if this * is racy or not, because it can only happen on cardmgr access * paths... */ if (!(p_dev->dev.driver == &p_drv->drv)) p_dev->cardmgr = NULL; err_put_module: module_put(p_drv->owner); err_put_driver: put_driver(&p_drv->drv); err_put: pcmcia_put_socket(s); return (ret); } /* bind_request */ #ifdef CONFIG_CARDBUS static struct pci_bus *pcmcia_lookup_bus(struct pcmcia_socket *s) { if (!s || !(s->state & SOCKET_CARDBUS)) return NULL; return s->cb_dev->subordinate; } #endif static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int first) { dev_node_t *node; struct pcmcia_device *p_dev; struct pcmcia_driver *p_drv; unsigned long flags; int ret = 0; #ifdef CONFIG_CARDBUS /* * Some unbelievably ugly code to associate the PCI cardbus * device and its driver with the PCMCIA "bind" information. */ { struct pci_bus *bus; bus = pcmcia_lookup_bus(s); if (bus) { struct list_head *list; struct pci_dev *dev = NULL; list = bus->devices.next; while (list != &bus->devices) { struct pci_dev *pdev = pci_dev_b(list); list = list->next; if (first) { dev = pdev; break; } /* Try to handle "next" here some way? */ } if (dev && dev->driver) { strlcpy(bind_info->name, dev->driver->name, DEV_NAME_LEN); bind_info->major = 0; bind_info->minor = 0; bind_info->next = NULL; return 0; } } } #endif spin_lock_irqsave(&pcmcia_dev_list_lock, flags); list_for_each_entry(p_dev, &s->devices_list, socket_device_list) { if (p_dev->func == bind_info->function) { p_dev = pcmcia_get_dev(p_dev); if (!p_dev) continue; goto found; } } spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); return -ENODEV; found: spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); p_drv = to_pcmcia_drv(p_dev->dev.driver); if (p_drv && !p_dev->_locked) { ret = -EAGAIN; goto err_put; } if (first) node = p_dev->dev_node; else for (node = p_dev->dev_node; node; node = node->next) if (node == bind_info->next) break; if (!node) { ret = -ENODEV; goto err_put; } strlcpy(bind_info->name, node->dev_name, DEV_NAME_LEN); bind_info->major = node->major; bind_info->minor = node->minor; bind_info->next = node->next; err_put: pcmcia_put_dev(p_dev); return (ret); } /* get_device_info */ static int ds_open(struct inode *inode, struct file *file) { socket_t i = iminor(inode); struct pcmcia_socket *s; user_info_t *user; static int warning_printed = 0; int ret = 0; ds_dbg(0, "ds_open(socket %d)\n", i); lock_kernel(); s = pcmcia_get_socket_by_nr(i); if (!s) { ret = -ENODEV; goto out; } s = pcmcia_get_socket(s); if (!s) { ret = -ENODEV; goto out; } if ((file->f_flags & O_ACCMODE) != O_RDONLY) { if (s->pcmcia_state.busy) { pcmcia_put_socket(s); ret = -EBUSY; goto out; } else s->pcmcia_state.busy = 1; } user = kmalloc(sizeof(user_info_t), GFP_KERNEL); if (!user) { pcmcia_put_socket(s); ret = -ENOMEM; goto out; } user->event_tail = user->event_head = 0; user->next = s->user; user->user_magic = USER_MAGIC; user->socket = s; s->user = user; file->private_data = user; if (!warning_printed) { printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl " "usage from process: %s.\n", current->comm); printk(KERN_INFO "pcmcia: This interface will soon be removed from " "the kernel; please expect breakage unless you upgrade " "to new tools.\n"); printk(KERN_INFO "pcmcia: see http://www.kernel.org/pub/linux/" "utils/kernel/pcmcia/pcmcia.html for details.\n"); warning_printed = 1; } if (s->pcmcia_state.present) queue_event(user, CS_EVENT_CARD_INSERTION); out: unlock_kernel(); return ret; } /* ds_open */ /*====================================================================*/ static int ds_release(struct inode *inode, struct file *file) { struct pcmcia_socket *s; user_info_t *user, **link; ds_dbg(0, "ds_release(socket %d)\n", iminor(inode)); user = file->private_data; if (CHECK_USER(user)) goto out; s = user->socket; /* Unlink user data structure */ if ((file->f_flags & O_ACCMODE) != O_RDONLY) { s->pcmcia_state.busy = 0; } file->private_data = NULL; for (link = &s->user; *link; link = &(*link)->next) if (*link == user) break; if (link == NULL) goto out; *link = user->next; user->user_magic = 0; kfree(user); pcmcia_put_socket(s); out: return 0; } /* ds_release */ /*====================================================================*/ static ssize_t ds_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct pcmcia_socket *s; user_info_t *user; int ret; ds_dbg(2, "ds_read(socket %d)\n", iminor(file->f_path.dentry->d_inode)); if (count < 4) return -EINVAL; user = file->private_data; if (CHECK_USER(user)) return -EIO; s = user->socket; if (s->pcmcia_state.dead) return -EIO; ret = wait_event_interruptible(s->queue, !queue_empty(user)); if (ret == 0) ret = put_user(get_queued_event(user), (int __user *)buf) ? -EFAULT : 4; return ret; } /* ds_read */ /*====================================================================*/ static ssize_t ds_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { ds_dbg(2, "ds_write(socket %d)\n", iminor(file->f_path.dentry->d_inode)); if (count != 4) return -EINVAL; if ((file->f_flags & O_ACCMODE) == O_RDONLY) return -EBADF; return -EIO; } /* ds_write */ /*====================================================================*/ /* No kernel lock - fine */ static u_int ds_poll(struct file *file, poll_table *wait) { struct pcmcia_socket *s; user_info_t *user; ds_dbg(2, "ds_poll(socket %d)\n", iminor(file->f_path.dentry->d_inode)); user = file->private_data; if (CHECK_USER(user)) return POLLERR; s = user->socket; /* * We don't check for a dead socket here since that * will send cardmgr into an endless spin. */ poll_wait(file, &s->queue, wait); if (!queue_empty(user)) return POLLIN | POLLRDNORM; return 0; } /* ds_poll */ /*====================================================================*/ static int ds_ioctl(struct inode * inode, struct file * file, u_int cmd, u_long arg) { struct pcmcia_socket *s; void __user *uarg = (char __user *)arg; u_int size; int ret, err; ds_ioctl_arg_t *buf; user_info_t *user; ds_dbg(2, "ds_ioctl(socket %d, %#x, %#lx)\n", iminor(inode), cmd, arg); user = file->private_data; if (CHECK_USER(user)) return -EIO; s = user->socket; if (s->pcmcia_state.dead) return -EIO; size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; if (size > sizeof(ds_ioctl_arg_t)) return -EINVAL; /* Permission check */ if (!(cmd & IOC_OUT) && !capable(CAP_SYS_ADMIN)) return -EPERM; if (cmd & IOC_IN) { if (!access_ok(VERIFY_READ, uarg, size)) { ds_dbg(3, "ds_ioctl(): verify_read = %d\n", -EFAULT); return -EFAULT; } } if (cmd & IOC_OUT) { if (!access_ok(VERIFY_WRITE, uarg, size)) { ds_dbg(3, "ds_ioctl(): verify_write = %d\n", -EFAULT); return -EFAULT; } } buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL); if (!buf) return -ENOMEM; err = ret = 0; if (cmd & IOC_IN) { if (__copy_from_user((char *)buf, uarg, size)) { err = -EFAULT; goto free_out; } } switch (cmd) { case DS_ADJUST_RESOURCE_INFO: ret = pcmcia_adjust_resource_info(&buf->adjust); break; case DS_GET_CONFIGURATION_INFO: if (buf->config.Function && (buf->config.Function >= s->functions)) ret = -EINVAL; else { struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function); ret = pccard_get_configuration_info(s, p_dev, &buf->config); pcmcia_put_dev(p_dev); } break; case DS_GET_FIRST_TUPLE: mutex_lock(&s->skt_mutex); pcmcia_validate_mem(s); mutex_unlock(&s->skt_mutex); ret = pccard_get_first_tuple(s, BIND_FN_ALL, &buf->tuple); break; case DS_GET_NEXT_TUPLE: ret = pccard_get_next_tuple(s, BIND_FN_ALL, &buf->tuple); break; case DS_GET_TUPLE_DATA: buf->tuple.TupleData = buf->tuple_parse.data; buf->tuple.TupleDataMax = sizeof(buf->tuple_parse.data); ret = pccard_get_tuple_data(s, &buf->tuple); break; case DS_PARSE_TUPLE: buf->tuple.TupleData = buf->tuple_parse.data; ret = pcmcia_parse_tuple(&buf->tuple, &buf->tuple_parse.parse); break; case DS_RESET_CARD: ret = pcmcia_reset_card(s); break; case DS_GET_STATUS: if (buf->status.Function && (buf->status.Function >= s->functions)) ret = -EINVAL; else { struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function); ret = pccard_get_status(s, p_dev, &buf->status); pcmcia_put_dev(p_dev); } break; case DS_VALIDATE_CIS: mutex_lock(&s->skt_mutex); pcmcia_validate_mem(s); mutex_unlock(&s->skt_mutex); ret = pccard_validate_cis(s, &buf->cisinfo.Chains); break; case DS_SUSPEND_CARD: ret = pcmcia_suspend_card(s); break; case DS_RESUME_CARD: ret = pcmcia_resume_card(s); break; case DS_EJECT_CARD: err = pcmcia_eject_card(s); break; case DS_INSERT_CARD: err = pcmcia_insert_card(s); break; case DS_ACCESS_CONFIGURATION_REGISTER: if ((buf->conf_reg.Action == CS_WRITE) && !capable(CAP_SYS_ADMIN)) { err = -EPERM; goto free_out; } ret = -EINVAL; if (!(buf->conf_reg.Function && (buf->conf_reg.Function >= s->functions))) { struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function); if (p_dev) { ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg); pcmcia_put_dev(p_dev); } } break; case DS_GET_FIRST_REGION: case DS_GET_NEXT_REGION: case DS_BIND_MTD: if (!capable(CAP_SYS_ADMIN)) { err = -EPERM; goto free_out; } else { printk_once(KERN_WARNING "2.6. kernels use pcmciamtd instead of memory_cs.c and do not require special\n"); printk_once(KERN_WARNING "MTD handling any more.\n"); } err = -EINVAL; goto free_out; break; case DS_GET_FIRST_WINDOW: ret = pcmcia_get_window(s, &buf->win_info.handle, 0, &buf->win_info.window); break; case DS_GET_NEXT_WINDOW: ret = pcmcia_get_window(s, &buf->win_info.handle, buf->win_info.handle->index + 1, &buf->win_info.window); break; case DS_GET_MEM_PAGE: ret = pcmcia_get_mem_page(buf->win_info.handle, &buf->win_info.map); break; case DS_REPLACE_CIS: ret = pcmcia_replace_cis(s, buf->cisdump.Data, buf->cisdump.Length); break; case DS_BIND_REQUEST: if (!capable(CAP_SYS_ADMIN)) { err = -EPERM; goto free_out; } err = bind_request(s, &buf->bind_info); break; case DS_GET_DEVICE_INFO: err = get_device_info(s, &buf->bind_info, 1); break; case DS_GET_NEXT_DEVICE: err = get_device_info(s, &buf->bind_info, 0); break; case DS_UNBIND_REQUEST: err = 0; break; default: err = -EINVAL; } if ((err == 0) && (ret != 0)) { ds_dbg(2, "ds_ioctl: ret = %d\n", ret); switch (ret) { case -ENODEV: case -EINVAL: case -EBUSY: case -ENOSYS: err = ret; break; case -ENOMEM: err = -ENOSPC; break; case -ENOSPC: err = -ENODATA; break; default: err = -EIO; break; } } if (cmd & IOC_OUT) { if (__copy_to_user(uarg, (char *)buf, size)) err = -EFAULT; } free_out: kfree(buf); return err; } /* ds_ioctl */ /*====================================================================*/ static const struct file_operations ds_fops = { .owner = THIS_MODULE, .open = ds_open, .release = ds_release, .ioctl = ds_ioctl, .read = ds_read, .write = ds_write, .poll = ds_poll, }; void __init pcmcia_setup_ioctl(void) { int i; /* Set up character device for user mode clients */ i = register_chrdev(0, "pcmcia", &ds_fops); if (i < 0) printk(KERN_NOTICE "unable to find a free device # for " "Driver Services (error=%d)\n", i); else major_dev = i; #ifdef CONFIG_PROC_FS proc_pccard = proc_mkdir("bus/pccard", NULL); if (proc_pccard) proc_create("drivers", 0, proc_pccard, &pccard_drivers_proc_fops); #endif } void __exit pcmcia_cleanup_ioctl(void) { #ifdef CONFIG_PROC_FS if (proc_pccard) { remove_proc_entry("drivers", proc_pccard); remove_proc_entry("bus/pccard", NULL); } #endif if (major_dev != -1) unregister_chrdev(major_dev, "pcmcia"); }
gpl-2.0
ac100-ru/old_ac100_kernel
security/tomoyo/tomoyo.c
504
7900
/* * security/tomoyo/tomoyo.c * * LSM hooks for TOMOYO Linux. * * Copyright (C) 2005-2009 NTT DATA CORPORATION * * Version: 2.2.0 2009/04/01 * */ #include <linux/security.h> #include "common.h" #include "tomoyo.h" #include "realpath.h" static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp) { new->security = NULL; return 0; } static int tomoyo_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { /* * Since "struct tomoyo_domain_info *" is a sharable pointer, * we don't need to duplicate. */ new->security = old->security; return 0; } static void tomoyo_cred_transfer(struct cred *new, const struct cred *old) { /* * Since "struct tomoyo_domain_info *" is a sharable pointer, * we don't need to duplicate. */ new->security = old->security; } static int tomoyo_bprm_set_creds(struct linux_binprm *bprm) { int rc; rc = cap_bprm_set_creds(bprm); if (rc) return rc; /* * Do only if this function is called for the first time of an execve * operation. */ if (bprm->cred_prepared) return 0; /* * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested * for the first time. */ if (!tomoyo_policy_loaded) tomoyo_load_policy(bprm->filename); /* * Tell tomoyo_bprm_check_security() is called for the first time of an * execve operation. */ bprm->cred->security = NULL; return 0; } static int tomoyo_bprm_check_security(struct linux_binprm *bprm) { struct tomoyo_domain_info *domain = bprm->cred->security; /* * Execute permission is checked against pathname passed to do_execve() * using current domain. */ if (!domain) return tomoyo_find_next_domain(bprm); /* * Read permission is checked against interpreters using next domain. * '1' is the result of open_to_namei_flags(O_RDONLY). */ return tomoyo_check_open_permission(domain, &bprm->file->f_path, 1); } #ifdef CONFIG_SYSCTL static int tomoyo_prepend(char **buffer, int *buflen, const char *str) { int namelen = strlen(str); if (*buflen < namelen) return -ENOMEM; *buflen -= namelen; *buffer -= namelen; memcpy(*buffer, str, namelen); return 0; } /** * tomoyo_sysctl_path - return the realpath of a ctl_table. * @table: pointer to "struct ctl_table". * * Returns realpath(3) of the @table on success. * Returns NULL on failure. * * This function uses tomoyo_alloc(), so the caller must call tomoyo_free() * if this function didn't return NULL. */ static char *tomoyo_sysctl_path(struct ctl_table *table) { int buflen = TOMOYO_MAX_PATHNAME_LEN; char *buf = tomoyo_alloc(buflen); char *end = buf + buflen; int error = -ENOMEM; if (!buf) return NULL; *--end = '\0'; buflen--; while (table) { char num[32]; const char *sp = table->procname; if (!sp) { memset(num, 0, sizeof(num)); snprintf(num, sizeof(num) - 1, "=%d=", table->ctl_name); sp = num; } if (tomoyo_prepend(&end, &buflen, sp) || tomoyo_prepend(&end, &buflen, "/")) goto out; table = table->parent; } if (tomoyo_prepend(&end, &buflen, "/proc/sys")) goto out; error = tomoyo_encode(buf, end - buf, end); out: if (!error) return buf; tomoyo_free(buf); return NULL; } static int tomoyo_sysctl(struct ctl_table *table, int op) { int error; char *name; op &= MAY_READ | MAY_WRITE; if (!op) return 0; name = tomoyo_sysctl_path(table); if (!name) return -ENOMEM; error = tomoyo_check_file_perm(tomoyo_domain(), name, op); tomoyo_free(name); return error; } #endif static int tomoyo_path_truncate(struct path *path, loff_t length, unsigned int time_attrs) { return tomoyo_check_1path_perm(tomoyo_domain(), TOMOYO_TYPE_TRUNCATE_ACL, path); } static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry) { struct path path = { parent->mnt, dentry }; return tomoyo_check_1path_perm(tomoyo_domain(), TOMOYO_TYPE_UNLINK_ACL, &path); } static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry, int mode) { struct path path = { parent->mnt, dentry }; return tomoyo_check_1path_perm(tomoyo_domain(), TOMOYO_TYPE_MKDIR_ACL, &path); } static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry) { struct path path = { parent->mnt, dentry }; return tomoyo_check_1path_perm(tomoyo_domain(), TOMOYO_TYPE_RMDIR_ACL, &path); } static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry, const char *old_name) { struct path path = { parent->mnt, dentry }; return tomoyo_check_1path_perm(tomoyo_domain(), TOMOYO_TYPE_SYMLINK_ACL, &path); } static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry, int mode, unsigned int dev) { struct path path = { parent->mnt, dentry }; int type = TOMOYO_TYPE_CREATE_ACL; switch (mode & S_IFMT) { case S_IFCHR: type = TOMOYO_TYPE_MKCHAR_ACL; break; case S_IFBLK: type = TOMOYO_TYPE_MKBLOCK_ACL; break; case S_IFIFO: type = TOMOYO_TYPE_MKFIFO_ACL; break; case S_IFSOCK: type = TOMOYO_TYPE_MKSOCK_ACL; break; } return tomoyo_check_1path_perm(tomoyo_domain(), type, &path); } static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry) { struct path path1 = { new_dir->mnt, old_dentry }; struct path path2 = { new_dir->mnt, new_dentry }; return tomoyo_check_2path_perm(tomoyo_domain(), TOMOYO_TYPE_LINK_ACL, &path1, &path2); } static int tomoyo_path_rename(struct path *old_parent, struct dentry *old_dentry, struct path *new_parent, struct dentry *new_dentry) { struct path path1 = { old_parent->mnt, old_dentry }; struct path path2 = { new_parent->mnt, new_dentry }; return tomoyo_check_2path_perm(tomoyo_domain(), TOMOYO_TYPE_RENAME_ACL, &path1, &path2); } static int tomoyo_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { if (cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND)) return tomoyo_check_rewrite_permission(tomoyo_domain(), file); return 0; } static int tomoyo_dentry_open(struct file *f, const struct cred *cred) { int flags = f->f_flags; if ((flags + 1) & O_ACCMODE) flags++; flags |= f->f_flags & (O_APPEND | O_TRUNC); /* Don't check read permission here if called from do_execve(). */ if (current->in_execve) return 0; return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags); } /* * tomoyo_security_ops is a "struct security_operations" which is used for * registering TOMOYO. */ static struct security_operations tomoyo_security_ops = { .name = "tomoyo", .cred_alloc_blank = tomoyo_cred_alloc_blank, .cred_prepare = tomoyo_cred_prepare, .cred_transfer = tomoyo_cred_transfer, .bprm_set_creds = tomoyo_bprm_set_creds, .bprm_check_security = tomoyo_bprm_check_security, #ifdef CONFIG_SYSCTL .sysctl = tomoyo_sysctl, #endif .file_fcntl = tomoyo_file_fcntl, .dentry_open = tomoyo_dentry_open, .path_truncate = tomoyo_path_truncate, .path_unlink = tomoyo_path_unlink, .path_mkdir = tomoyo_path_mkdir, .path_rmdir = tomoyo_path_rmdir, .path_symlink = tomoyo_path_symlink, .path_mknod = tomoyo_path_mknod, .path_link = tomoyo_path_link, .path_rename = tomoyo_path_rename, }; static int __init tomoyo_init(void) { struct cred *cred = (struct cred *) current_cred(); if (!security_module_enable(&tomoyo_security_ops)) return 0; /* register ourselves with the security framework */ if (register_security(&tomoyo_security_ops)) panic("Failure registering TOMOYO Linux"); printk(KERN_INFO "TOMOYO Linux initialized\n"); cred->security = &tomoyo_kernel_domain; tomoyo_realpath_init(); return 0; } security_initcall(tomoyo_init);
gpl-2.0
Framework43/touchpad-kernel
drivers/media/video/em28xx/em28xx-video.c
760
63607
/* em28xx-video.c - driver for Empia EM2800/EM2820/2840 USB video capture devices Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> Markus Rechberger <mrechberger@gmail.com> Mauro Carvalho Chehab <mchehab@infradead.org> Sascha Sommer <saschasommer@freenet.de> Some parts based on SN9C10x PC Camera Controllers GPL driver made by Luca Risolia <luca.risolia@studio.unibo.it> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bitmap.h> #include <linux/usb.h> #include <linux/i2c.h> #include <linux/version.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/slab.h> #include "em28xx.h" #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-chip-ident.h> #include <media/msp3400.h> #include <media/tuner.h> #define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \ "Markus Rechberger <mrechberger@gmail.com>, " \ "Mauro Carvalho Chehab <mchehab@infradead.org>, " \ "Sascha Sommer <saschasommer@freenet.de>" #define DRIVER_DESC "Empia em28xx based USB video device driver" #define EM28XX_VERSION_CODE KERNEL_VERSION(0, 1, 2) #define em28xx_videodbg(fmt, arg...) do {\ if (video_debug) \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); } while (0) static unsigned int isoc_debug; module_param(isoc_debug, int, 0644); MODULE_PARM_DESC(isoc_debug, "enable debug messages [isoc transfers]"); #define em28xx_isocdbg(fmt, arg...) \ do {\ if (isoc_debug) { \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); \ } \ } while (0) MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; static unsigned int radio_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; module_param_array(video_nr, int, NULL, 0444); module_param_array(vbi_nr, int, NULL, 0444); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(video_nr, "video device numbers"); MODULE_PARM_DESC(vbi_nr, "vbi device numbers"); MODULE_PARM_DESC(radio_nr, "radio device numbers"); static unsigned int video_debug; module_param(video_debug, int, 0644); MODULE_PARM_DESC(video_debug, "enable debug messages [video]"); /* supported video standards */ static struct em28xx_fmt format[] = { { .name = "16 bpp YUY2, 4:2:2, packed", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, .reg = EM28XX_OUTFMT_YUV422_Y0UY1V, }, { .name = "16 bpp RGB 565, LE", .fourcc = V4L2_PIX_FMT_RGB565, .depth = 16, .reg = EM28XX_OUTFMT_RGB_16_656, }, { .name = "8 bpp Bayer BGBG..GRGR", .fourcc = V4L2_PIX_FMT_SBGGR8, .depth = 8, .reg = EM28XX_OUTFMT_RGB_8_BGBG, }, { .name = "8 bpp Bayer GRGR..BGBG", .fourcc = V4L2_PIX_FMT_SGRBG8, .depth = 8, .reg = EM28XX_OUTFMT_RGB_8_GRGR, }, { .name = "8 bpp Bayer GBGB..RGRG", .fourcc = V4L2_PIX_FMT_SGBRG8, .depth = 8, .reg = EM28XX_OUTFMT_RGB_8_GBGB, }, { .name = "12 bpp YUV411", .fourcc = V4L2_PIX_FMT_YUV411P, .depth = 12, .reg = EM28XX_OUTFMT_YUV411, }, }; /* supported controls */ /* Common to all boards */ static struct v4l2_queryctrl ac97_qctrl[] = { { .id = V4L2_CID_AUDIO_VOLUME, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Volume", .minimum = 0x0, .maximum = 0x1f, .step = 0x1, .default_value = 0x1f, .flags = V4L2_CTRL_FLAG_SLIDER, }, { .id = V4L2_CID_AUDIO_MUTE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mute", .minimum = 0, .maximum = 1, .step = 1, .default_value = 1, .flags = 0, } }; /* ------------------------------------------------------------------ DMA and thread functions ------------------------------------------------------------------*/ /* * Announces that a buffer were filled and request the next */ static inline void buffer_filled(struct em28xx *dev, struct em28xx_dmaqueue *dma_q, struct em28xx_buffer *buf) { /* Advice that buffer was filled */ em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; do_gettimeofday(&buf->vb.ts); dev->isoc_ctl.vid_buf = NULL; list_del(&buf->vb.queue); wake_up(&buf->vb.done); } static inline void vbi_buffer_filled(struct em28xx *dev, struct em28xx_dmaqueue *dma_q, struct em28xx_buffer *buf) { /* Advice that buffer was filled */ em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; do_gettimeofday(&buf->vb.ts); dev->isoc_ctl.vbi_buf = NULL; list_del(&buf->vb.queue); wake_up(&buf->vb.done); } /* * Identify the buffer header type and properly handles */ static void em28xx_copy_video(struct em28xx *dev, struct em28xx_dmaqueue *dma_q, struct em28xx_buffer *buf, unsigned char *p, unsigned char *outp, unsigned long len) { void *fieldstart, *startwrite, *startread; int linesdone, currlinedone, offset, lencopy, remain; int bytesperline = dev->width << 1; if (dma_q->pos + len > buf->vb.size) len = buf->vb.size - dma_q->pos; startread = p; remain = len; if (dev->progressive) fieldstart = outp; else { /* Interlaces two half frames */ if (buf->top_field) fieldstart = outp; else fieldstart = outp + bytesperline; } linesdone = dma_q->pos / bytesperline; currlinedone = dma_q->pos % bytesperline; if (dev->progressive) offset = linesdone * bytesperline + currlinedone; else offset = linesdone * bytesperline * 2 + currlinedone; startwrite = fieldstart + offset; lencopy = bytesperline - currlinedone; lencopy = lencopy > remain ? remain : lencopy; if ((char *)startwrite + lencopy > (char *)outp + buf->vb.size) { em28xx_isocdbg("Overflow of %zi bytes past buffer end (1)\n", ((char *)startwrite + lencopy) - ((char *)outp + buf->vb.size)); remain = (char *)outp + buf->vb.size - (char *)startwrite; lencopy = remain; } if (lencopy <= 0) return; memcpy(startwrite, startread, lencopy); remain -= lencopy; while (remain > 0) { startwrite += lencopy + bytesperline; startread += lencopy; if (bytesperline > remain) lencopy = remain; else lencopy = bytesperline; if ((char *)startwrite + lencopy > (char *)outp + buf->vb.size) { em28xx_isocdbg("Overflow of %zi bytes past buffer end" "(2)\n", ((char *)startwrite + lencopy) - ((char *)outp + buf->vb.size)); lencopy = remain = (char *)outp + buf->vb.size - (char *)startwrite; } if (lencopy <= 0) break; memcpy(startwrite, startread, lencopy); remain -= lencopy; } dma_q->pos += len; } static void em28xx_copy_vbi(struct em28xx *dev, struct em28xx_dmaqueue *dma_q, struct em28xx_buffer *buf, unsigned char *p, unsigned char *outp, unsigned long len) { void *startwrite, *startread; int offset; int bytesperline = dev->vbi_width; if (dev == NULL) { em28xx_isocdbg("dev is null\n"); return; } if (dma_q == NULL) { em28xx_isocdbg("dma_q is null\n"); return; } if (buf == NULL) { return; } if (p == NULL) { em28xx_isocdbg("p is null\n"); return; } if (outp == NULL) { em28xx_isocdbg("outp is null\n"); return; } if (dma_q->pos + len > buf->vb.size) len = buf->vb.size - dma_q->pos; startread = p; startwrite = outp + dma_q->pos; offset = dma_q->pos; /* Make sure the bottom field populates the second half of the frame */ if (buf->top_field == 0) { startwrite += bytesperline * dev->vbi_height; offset += bytesperline * dev->vbi_height; } memcpy(startwrite, startread, len); dma_q->pos += len; } static inline void print_err_status(struct em28xx *dev, int packet, int status) { char *errmsg = "Unknown"; switch (status) { case -ENOENT: errmsg = "unlinked synchronuously"; break; case -ECONNRESET: errmsg = "unlinked asynchronuously"; break; case -ENOSR: errmsg = "Buffer error (overrun)"; break; case -EPIPE: errmsg = "Stalled (device not responding)"; break; case -EOVERFLOW: errmsg = "Babble (bad cable?)"; break; case -EPROTO: errmsg = "Bit-stuff error (bad cable?)"; break; case -EILSEQ: errmsg = "CRC/Timeout (could be anything)"; break; case -ETIME: errmsg = "Device does not respond"; break; } if (packet < 0) { em28xx_isocdbg("URB status %d [%s].\n", status, errmsg); } else { em28xx_isocdbg("URB packet %d, status %d [%s].\n", packet, status, errmsg); } } /* * video-buf generic routine to get the next available buffer */ static inline void get_next_buf(struct em28xx_dmaqueue *dma_q, struct em28xx_buffer **buf) { struct em28xx *dev = container_of(dma_q, struct em28xx, vidq); char *outp; if (list_empty(&dma_q->active)) { em28xx_isocdbg("No active queue to serve\n"); dev->isoc_ctl.vid_buf = NULL; *buf = NULL; return; } /* Get the next buffer */ *buf = list_entry(dma_q->active.next, struct em28xx_buffer, vb.queue); /* Cleans up buffer - Usefull for testing for frame/URB loss */ outp = videobuf_to_vmalloc(&(*buf)->vb); memset(outp, 0, (*buf)->vb.size); dev->isoc_ctl.vid_buf = *buf; return; } /* * video-buf generic routine to get the next available VBI buffer */ static inline void vbi_get_next_buf(struct em28xx_dmaqueue *dma_q, struct em28xx_buffer **buf) { struct em28xx *dev = container_of(dma_q, struct em28xx, vbiq); char *outp; if (list_empty(&dma_q->active)) { em28xx_isocdbg("No active queue to serve\n"); dev->isoc_ctl.vbi_buf = NULL; *buf = NULL; return; } /* Get the next buffer */ *buf = list_entry(dma_q->active.next, struct em28xx_buffer, vb.queue); /* Cleans up buffer - Usefull for testing for frame/URB loss */ outp = videobuf_to_vmalloc(&(*buf)->vb); memset(outp, 0x00, (*buf)->vb.size); dev->isoc_ctl.vbi_buf = *buf; return; } /* * Controls the isoc copy of each urb packet */ static inline int em28xx_isoc_copy(struct em28xx *dev, struct urb *urb) { struct em28xx_buffer *buf; struct em28xx_dmaqueue *dma_q = &dev->vidq; unsigned char *outp = NULL; int i, len = 0, rc = 1; unsigned char *p; if (!dev) return 0; if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED)) return 0; if (urb->status < 0) { print_err_status(dev, -1, urb->status); if (urb->status == -ENOENT) return 0; } buf = dev->isoc_ctl.vid_buf; if (buf != NULL) outp = videobuf_to_vmalloc(&buf->vb); for (i = 0; i < urb->number_of_packets; i++) { int status = urb->iso_frame_desc[i].status; if (status < 0) { print_err_status(dev, i, status); if (urb->iso_frame_desc[i].status != -EPROTO) continue; } len = urb->iso_frame_desc[i].actual_length - 4; if (urb->iso_frame_desc[i].actual_length <= 0) { /* em28xx_isocdbg("packet %d is empty",i); - spammy */ continue; } if (urb->iso_frame_desc[i].actual_length > dev->max_pkt_size) { em28xx_isocdbg("packet bigger than packet size"); continue; } p = urb->transfer_buffer + urb->iso_frame_desc[i].offset; /* FIXME: incomplete buffer checks where removed to make logic simpler. Impacts of those changes should be evaluated */ if (p[0] == 0x33 && p[1] == 0x95 && p[2] == 0x00) { em28xx_isocdbg("VBI HEADER!!!\n"); /* FIXME: Should add vbi copy */ continue; } if (p[0] == 0x22 && p[1] == 0x5a) { em28xx_isocdbg("Video frame %d, length=%i, %s\n", p[2], len, (p[2] & 1) ? "odd" : "even"); if (dev->progressive || !(p[2] & 1)) { if (buf != NULL) buffer_filled(dev, dma_q, buf); get_next_buf(dma_q, &buf); if (buf == NULL) outp = NULL; else outp = videobuf_to_vmalloc(&buf->vb); } if (buf != NULL) { if (p[2] & 1) buf->top_field = 0; else buf->top_field = 1; } dma_q->pos = 0; } if (buf != NULL) { if (p[0] != 0x88 && p[0] != 0x22) { em28xx_isocdbg("frame is not complete\n"); len += 4; } else { p += 4; } em28xx_copy_video(dev, dma_q, buf, p, outp, len); } } return rc; } /* Version of isoc handler that takes into account a mixture of video and VBI data */ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb) { struct em28xx_buffer *buf, *vbi_buf; struct em28xx_dmaqueue *dma_q = &dev->vidq; struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq; unsigned char *outp = NULL; unsigned char *vbioutp = NULL; int i, len = 0, rc = 1; unsigned char *p; int vbi_size; if (!dev) return 0; if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED)) return 0; if (urb->status < 0) { print_err_status(dev, -1, urb->status); if (urb->status == -ENOENT) return 0; } buf = dev->isoc_ctl.vid_buf; if (buf != NULL) outp = videobuf_to_vmalloc(&buf->vb); vbi_buf = dev->isoc_ctl.vbi_buf; if (vbi_buf != NULL) vbioutp = videobuf_to_vmalloc(&vbi_buf->vb); for (i = 0; i < urb->number_of_packets; i++) { int status = urb->iso_frame_desc[i].status; if (status < 0) { print_err_status(dev, i, status); if (urb->iso_frame_desc[i].status != -EPROTO) continue; } len = urb->iso_frame_desc[i].actual_length; if (urb->iso_frame_desc[i].actual_length <= 0) { /* em28xx_isocdbg("packet %d is empty",i); - spammy */ continue; } if (urb->iso_frame_desc[i].actual_length > dev->max_pkt_size) { em28xx_isocdbg("packet bigger than packet size"); continue; } p = urb->transfer_buffer + urb->iso_frame_desc[i].offset; /* capture type 0 = vbi start capture type 1 = video start capture type 2 = video in progress */ if (p[0] == 0x33 && p[1] == 0x95) { dev->capture_type = 0; dev->vbi_read = 0; em28xx_isocdbg("VBI START HEADER!!!\n"); dev->cur_field = p[2]; p += 4; len -= 4; } else if (p[0] == 0x88 && p[1] == 0x88 && p[2] == 0x88 && p[3] == 0x88) { /* continuation */ p += 4; len -= 4; } else if (p[0] == 0x22 && p[1] == 0x5a) { /* start video */ p += 4; len -= 4; } vbi_size = dev->vbi_width * dev->vbi_height; if (dev->capture_type == 0) { if (dev->vbi_read >= vbi_size) { /* We've already read all the VBI data, so treat the rest as video */ em28xx_isocdbg("dev->vbi_read > vbi_size\n"); } else if ((dev->vbi_read + len) < vbi_size) { /* This entire frame is VBI data */ if (dev->vbi_read == 0 && (!(dev->cur_field & 1))) { /* Brand new frame */ if (vbi_buf != NULL) vbi_buffer_filled(dev, vbi_dma_q, vbi_buf); vbi_get_next_buf(vbi_dma_q, &vbi_buf); if (vbi_buf == NULL) vbioutp = NULL; else vbioutp = videobuf_to_vmalloc( &vbi_buf->vb); } if (dev->vbi_read == 0) { vbi_dma_q->pos = 0; if (vbi_buf != NULL) { if (dev->cur_field & 1) vbi_buf->top_field = 0; else vbi_buf->top_field = 1; } } dev->vbi_read += len; em28xx_copy_vbi(dev, vbi_dma_q, vbi_buf, p, vbioutp, len); } else { /* Some of this frame is VBI data and some is video data */ int vbi_data_len = vbi_size - dev->vbi_read; dev->vbi_read += vbi_data_len; em28xx_copy_vbi(dev, vbi_dma_q, vbi_buf, p, vbioutp, vbi_data_len); dev->capture_type = 1; p += vbi_data_len; len -= vbi_data_len; } } if (dev->capture_type == 1) { dev->capture_type = 2; if (dev->progressive || !(dev->cur_field & 1)) { if (buf != NULL) buffer_filled(dev, dma_q, buf); get_next_buf(dma_q, &buf); if (buf == NULL) outp = NULL; else outp = videobuf_to_vmalloc(&buf->vb); } if (buf != NULL) { if (dev->cur_field & 1) buf->top_field = 0; else buf->top_field = 1; } dma_q->pos = 0; } if (buf != NULL && dev->capture_type == 2) { if (len > 4 && p[0] == 0x88 && p[1] == 0x88 && p[2] == 0x88 && p[3] == 0x88) { p += 4; len -= 4; } if (len > 4 && p[0] == 0x22 && p[1] == 0x5a) { em28xx_isocdbg("Video frame %d, len=%i, %s\n", p[2], len, (p[2] & 1) ? "odd" : "even"); p += 4; len -= 4; } if (len > 0) em28xx_copy_video(dev, dma_q, buf, p, outp, len); } } return rc; } /* ------------------------------------------------------------------ Videobuf operations ------------------------------------------------------------------*/ static int buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct em28xx_fh *fh = vq->priv_data; struct em28xx *dev = fh->dev; struct v4l2_frequency f; *size = (fh->dev->width * fh->dev->height * dev->format->depth + 7) >> 3; if (0 == *count) *count = EM28XX_DEF_BUF; if (*count < EM28XX_MIN_BUF) *count = EM28XX_MIN_BUF; /* Ask tuner to go to analog or radio mode */ memset(&f, 0, sizeof(f)); f.frequency = dev->ctl_freq; f.type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f); return 0; } /* This is called *without* dev->slock held; please keep it that way */ static void free_buffer(struct videobuf_queue *vq, struct em28xx_buffer *buf) { struct em28xx_fh *fh = vq->priv_data; struct em28xx *dev = fh->dev; unsigned long flags = 0; if (in_interrupt()) BUG(); /* We used to wait for the buffer to finish here, but this didn't work because, as we were keeping the state as VIDEOBUF_QUEUED, videobuf_queue_cancel marked it as finished for us. (Also, it could wedge forever if the hardware was misconfigured.) This should be safe; by the time we get here, the buffer isn't queued anymore. If we ever start marking the buffers as VIDEOBUF_ACTIVE, it won't be, though. */ spin_lock_irqsave(&dev->slock, flags); if (dev->isoc_ctl.vid_buf == buf) dev->isoc_ctl.vid_buf = NULL; spin_unlock_irqrestore(&dev->slock, flags); videobuf_vmalloc_free(&buf->vb); buf->vb.state = VIDEOBUF_NEEDS_INIT; } static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) { struct em28xx_fh *fh = vq->priv_data; struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb); struct em28xx *dev = fh->dev; int rc = 0, urb_init = 0; buf->vb.size = (fh->dev->width * fh->dev->height * dev->format->depth + 7) >> 3; if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) return -EINVAL; buf->vb.width = dev->width; buf->vb.height = dev->height; buf->vb.field = field; if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { rc = videobuf_iolock(vq, &buf->vb, NULL); if (rc < 0) goto fail; } if (!dev->isoc_ctl.num_bufs) urb_init = 1; if (urb_init) { if (em28xx_vbi_supported(dev) == 1) rc = em28xx_init_isoc(dev, EM28XX_NUM_PACKETS, EM28XX_NUM_BUFS, dev->max_pkt_size, em28xx_isoc_copy_vbi); else rc = em28xx_init_isoc(dev, EM28XX_NUM_PACKETS, EM28XX_NUM_BUFS, dev->max_pkt_size, em28xx_isoc_copy); if (rc < 0) goto fail; } buf->vb.state = VIDEOBUF_PREPARED; return 0; fail: free_buffer(vq, buf); return rc; } static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb); struct em28xx_fh *fh = vq->priv_data; struct em28xx *dev = fh->dev; struct em28xx_dmaqueue *vidq = &dev->vidq; buf->vb.state = VIDEOBUF_QUEUED; list_add_tail(&buf->vb.queue, &vidq->active); } static void buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb); struct em28xx_fh *fh = vq->priv_data; struct em28xx *dev = (struct em28xx *)fh->dev; em28xx_isocdbg("em28xx: called buffer_release\n"); free_buffer(vq, buf); } static struct videobuf_queue_ops em28xx_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .buf_release = buffer_release, }; /********************* v4l2 interface **************************************/ static void video_mux(struct em28xx *dev, int index) { dev->ctl_input = index; dev->ctl_ainput = INPUT(index)->amux; dev->ctl_aoutput = INPUT(index)->aout; if (!dev->ctl_aoutput) dev->ctl_aoutput = EM28XX_AOUT_MASTER; v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing, INPUT(index)->vmux, 0, 0); if (dev->board.has_msp34xx) { if (dev->i2s_speed) { v4l2_device_call_all(&dev->v4l2_dev, 0, audio, s_i2s_clock_freq, dev->i2s_speed); } /* Note: this is msp3400 specific */ v4l2_device_call_all(&dev->v4l2_dev, 0, audio, s_routing, dev->ctl_ainput, MSP_OUTPUT(MSP_SC_IN_DSP_SCART1), 0); } if (dev->board.adecoder != EM28XX_NOADECODER) { v4l2_device_call_all(&dev->v4l2_dev, 0, audio, s_routing, dev->ctl_ainput, dev->ctl_aoutput, 0); } em28xx_audio_analog_set(dev); } /* Usage lock check functions */ static int res_get(struct em28xx_fh *fh, unsigned int bit) { struct em28xx *dev = fh->dev; if (fh->resources & bit) /* have it already allocated */ return 1; /* is it free? */ mutex_lock(&dev->lock); if (dev->resources & bit) { /* no, someone else uses it */ mutex_unlock(&dev->lock); return 0; } /* it's free, grab it */ fh->resources |= bit; dev->resources |= bit; em28xx_videodbg("res: get %d\n", bit); mutex_unlock(&dev->lock); return 1; } static int res_check(struct em28xx_fh *fh, unsigned int bit) { return fh->resources & bit; } static int res_locked(struct em28xx *dev, unsigned int bit) { return dev->resources & bit; } static void res_free(struct em28xx_fh *fh, unsigned int bits) { struct em28xx *dev = fh->dev; BUG_ON((fh->resources & bits) != bits); mutex_lock(&dev->lock); fh->resources &= ~bits; dev->resources &= ~bits; em28xx_videodbg("res: put %d\n", bits); mutex_unlock(&dev->lock); } static int get_ressource(struct em28xx_fh *fh) { switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: return EM28XX_RESOURCE_VIDEO; case V4L2_BUF_TYPE_VBI_CAPTURE: return EM28XX_RESOURCE_VBI; default: BUG(); return 0; } } /* * ac97_queryctrl() * return the ac97 supported controls */ static int ac97_queryctrl(struct v4l2_queryctrl *qc) { int i; for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++) { if (qc->id && qc->id == ac97_qctrl[i].id) { memcpy(qc, &(ac97_qctrl[i]), sizeof(*qc)); return 0; } } /* Control is not ac97 related */ return 1; } /* * ac97_get_ctrl() * return the current values for ac97 mute and volume */ static int ac97_get_ctrl(struct em28xx *dev, struct v4l2_control *ctrl) { switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: ctrl->value = dev->mute; return 0; case V4L2_CID_AUDIO_VOLUME: ctrl->value = dev->volume; return 0; default: /* Control is not ac97 related */ return 1; } } /* * ac97_set_ctrl() * set values for ac97 mute and volume */ static int ac97_set_ctrl(struct em28xx *dev, const struct v4l2_control *ctrl) { int i; for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++) if (ctrl->id == ac97_qctrl[i].id) goto handle; /* Announce that hasn't handle it */ return 1; handle: if (ctrl->value < ac97_qctrl[i].minimum || ctrl->value > ac97_qctrl[i].maximum) return -ERANGE; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: dev->mute = ctrl->value; break; case V4L2_CID_AUDIO_VOLUME: dev->volume = ctrl->value; break; } return em28xx_audio_analog_set(dev); } static int check_dev(struct em28xx *dev) { if (dev->state & DEV_DISCONNECTED) { em28xx_errdev("v4l2 ioctl: device not present\n"); return -ENODEV; } if (dev->state & DEV_MISCONFIGURED) { em28xx_errdev("v4l2 ioctl: device is misconfigured; " "close and open it again\n"); return -EIO; } return 0; } static void get_scale(struct em28xx *dev, unsigned int width, unsigned int height, unsigned int *hscale, unsigned int *vscale) { unsigned int maxw = norm_maxw(dev); unsigned int maxh = norm_maxh(dev); *hscale = (((unsigned long)maxw) << 12) / width - 4096L; if (*hscale >= 0x4000) *hscale = 0x3fff; *vscale = (((unsigned long)maxh) << 12) / height - 4096L; if (*vscale >= 0x4000) *vscale = 0x3fff; } /* ------------------------------------------------------------------ IOCTL vidioc handling ------------------------------------------------------------------*/ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; mutex_lock(&dev->lock); f->fmt.pix.width = dev->width; f->fmt.pix.height = dev->height; f->fmt.pix.pixelformat = dev->format->fourcc; f->fmt.pix.bytesperline = (dev->width * dev->format->depth + 7) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * dev->height; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; /* FIXME: TOP? NONE? BOTTOM? ALTENATE? */ if (dev->progressive) f->fmt.pix.field = V4L2_FIELD_NONE; else f->fmt.pix.field = dev->interlaced ? V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; mutex_unlock(&dev->lock); return 0; } static struct em28xx_fmt *format_by_fourcc(unsigned int fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(format); i++) if (format[i].fourcc == fourcc) return &format[i]; return NULL; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; unsigned int width = f->fmt.pix.width; unsigned int height = f->fmt.pix.height; unsigned int maxw = norm_maxw(dev); unsigned int maxh = norm_maxh(dev); unsigned int hscale, vscale; struct em28xx_fmt *fmt; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (!fmt) { em28xx_videodbg("Fourcc format (%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } if (dev->board.is_em2800) { /* the em2800 can only scale down to 50% */ height = height > (3 * maxh / 4) ? maxh : maxh / 2; width = width > (3 * maxw / 4) ? maxw : maxw / 2; } else { /* width must even because of the YUYV format height must be even because of interlacing */ v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0); } get_scale(dev, width, height, &hscale, &vscale); width = (((unsigned long)maxw) << 12) / (hscale + 4096L); height = (((unsigned long)maxh) << 12) / (vscale + 4096L); f->fmt.pix.width = width; f->fmt.pix.height = height; f->fmt.pix.pixelformat = fmt->fourcc; f->fmt.pix.bytesperline = (dev->width * fmt->depth + 7) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * height; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; if (dev->progressive) f->fmt.pix.field = V4L2_FIELD_NONE; else f->fmt.pix.field = dev->interlaced ? V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; return 0; } static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc, unsigned width, unsigned height) { struct em28xx_fmt *fmt; fmt = format_by_fourcc(fourcc); if (!fmt) return -EINVAL; dev->format = fmt; dev->width = width; dev->height = height; /* set new image size */ get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale); em28xx_set_alternate(dev); em28xx_resolution_set(dev); return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; mutex_lock(&dev->lock); vidioc_try_fmt_vid_cap(file, priv, f); if (videobuf_queue_is_busy(&fh->vb_vidq)) { em28xx_errdev("%s queue busy\n", __func__); rc = -EBUSY; goto out; } rc = em28xx_set_video_format(dev, f->fmt.pix.pixelformat, f->fmt.pix.width, f->fmt.pix.height); out: mutex_unlock(&dev->lock); return rc; } static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; *norm = dev->norm; return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; struct v4l2_format f; int rc; rc = check_dev(dev); if (rc < 0) return rc; mutex_lock(&dev->lock); dev->norm = *norm; /* Adjusts width/height, if needed */ f.fmt.pix.width = dev->width; f.fmt.pix.height = dev->height; vidioc_try_fmt_vid_cap(file, priv, &f); /* set new image size */ dev->width = f.fmt.pix.width; dev->height = f.fmt.pix.height; get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale); em28xx_resolution_set(dev); v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm); mutex_unlock(&dev->lock); return 0; } static int vidioc_g_parm(struct file *file, void *priv, struct v4l2_streamparm *p) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc = 0; if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (dev->board.is_webcam) rc = v4l2_device_call_until_err(&dev->v4l2_dev, 0, video, g_parm, p); else v4l2_video_std_frame_period(dev->norm, &p->parm.capture.timeperframe); return rc; } static int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *p) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; if (!dev->board.is_webcam) return -EINVAL; if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; return v4l2_device_call_until_err(&dev->v4l2_dev, 0, video, s_parm, p); } static const char *iname[] = { [EM28XX_VMUX_COMPOSITE1] = "Composite1", [EM28XX_VMUX_COMPOSITE2] = "Composite2", [EM28XX_VMUX_COMPOSITE3] = "Composite3", [EM28XX_VMUX_COMPOSITE4] = "Composite4", [EM28XX_VMUX_SVIDEO] = "S-Video", [EM28XX_VMUX_TELEVISION] = "Television", [EM28XX_VMUX_CABLE] = "Cable TV", [EM28XX_VMUX_DVB] = "DVB", [EM28XX_VMUX_DEBUG] = "for debug only", }; static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; unsigned int n; n = i->index; if (n >= MAX_EM28XX_INPUT) return -EINVAL; if (0 == INPUT(n)->type) return -EINVAL; i->index = n; i->type = V4L2_INPUT_TYPE_CAMERA; strcpy(i->name, iname[INPUT(n)->type]); if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type) || (EM28XX_VMUX_CABLE == INPUT(n)->type)) i->type = V4L2_INPUT_TYPE_TUNER; i->std = dev->vdev->tvnorms; return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; *i = dev->ctl_input; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int i) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (i >= MAX_EM28XX_INPUT) return -EINVAL; if (0 == INPUT(i)->type) return -EINVAL; dev->ctl_input = i; mutex_lock(&dev->lock); video_mux(dev, dev->ctl_input); mutex_unlock(&dev->lock); return 0; } static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; if (!dev->audio_mode.has_audio) return -EINVAL; switch (a->index) { case EM28XX_AMUX_VIDEO: strcpy(a->name, "Television"); break; case EM28XX_AMUX_LINE_IN: strcpy(a->name, "Line In"); break; case EM28XX_AMUX_VIDEO2: strcpy(a->name, "Television alt"); break; case EM28XX_AMUX_PHONE: strcpy(a->name, "Phone"); break; case EM28XX_AMUX_MIC: strcpy(a->name, "Mic"); break; case EM28XX_AMUX_CD: strcpy(a->name, "CD"); break; case EM28XX_AMUX_AUX: strcpy(a->name, "Aux"); break; case EM28XX_AMUX_PCM_OUT: strcpy(a->name, "PCM"); break; default: return -EINVAL; } a->index = dev->ctl_ainput; a->capability = V4L2_AUDCAP_STEREO; return 0; } static int vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *a) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; if (!dev->audio_mode.has_audio) return -EINVAL; if (a->index >= MAX_EM28XX_INPUT) return -EINVAL; if (0 == INPUT(a->index)->type) return -EINVAL; mutex_lock(&dev->lock); dev->ctl_ainput = INPUT(a->index)->amux; dev->ctl_aoutput = INPUT(a->index)->aout; if (!dev->ctl_aoutput) dev->ctl_aoutput = EM28XX_AOUT_MASTER; mutex_unlock(&dev->lock); return 0; } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int id = qc->id; int rc; rc = check_dev(dev); if (rc < 0) return rc; memset(qc, 0, sizeof(*qc)); qc->id = id; /* enumberate AC97 controls */ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) { rc = ac97_queryctrl(qc); if (!rc) return 0; } /* enumberate V4L2 device controls */ mutex_lock(&dev->lock); v4l2_device_call_all(&dev->v4l2_dev, 0, core, queryctrl, qc); mutex_unlock(&dev->lock); if (qc->type) return 0; else return -EINVAL; } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; rc = 0; mutex_lock(&dev->lock); /* Set an AC97 control */ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) rc = ac97_get_ctrl(dev, ctrl); else rc = 1; /* It were not an AC97 control. Sends it to the v4l2 dev interface */ if (rc == 1) { v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_ctrl, ctrl); rc = 0; } mutex_unlock(&dev->lock); return rc; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; mutex_lock(&dev->lock); /* Set an AC97 control */ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) rc = ac97_set_ctrl(dev, ctrl); else rc = 1; /* It isn't an AC97 control. Sends it to the v4l2 dev interface */ if (rc == 1) { v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_ctrl, ctrl); /* * In the case of non-AC97 volume controls, we still need * to do some setups at em28xx, in order to mute/unmute * and to adjust audio volume. However, the value ranges * should be checked by the corresponding V4L subdriver. */ switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: dev->mute = ctrl->value; rc = em28xx_audio_analog_set(dev); break; case V4L2_CID_AUDIO_VOLUME: dev->volume = ctrl->value; rc = em28xx_audio_analog_set(dev); } } mutex_unlock(&dev->lock); return rc; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (0 != t->index) return -EINVAL; strcpy(t->name, "Tuner"); t->type = V4L2_TUNER_ANALOG_TV; mutex_lock(&dev->lock); v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t); mutex_unlock(&dev->lock); return 0; } static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (0 != t->index) return -EINVAL; mutex_lock(&dev->lock); v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t); mutex_unlock(&dev->lock); return 0; } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; mutex_lock(&dev->lock); f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; f->frequency = dev->ctl_freq; mutex_unlock(&dev->lock); return 0; } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (0 != f->tuner) return -EINVAL; if (unlikely(0 == fh->radio && f->type != V4L2_TUNER_ANALOG_TV)) return -EINVAL; if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO)) return -EINVAL; mutex_lock(&dev->lock); dev->ctl_freq = f->frequency; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f); mutex_unlock(&dev->lock); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int em28xx_reg_len(int reg) { switch (reg) { case EM28XX_R40_AC97LSB: case EM28XX_R30_HSCALELOW: case EM28XX_R32_VSCALELOW: return 2; default: return 1; } } static int vidioc_g_chip_ident(struct file *file, void *priv, struct v4l2_dbg_chip_ident *chip) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; chip->ident = V4L2_IDENT_NONE; chip->revision = 0; v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_chip_ident, chip); return 0; } static int vidioc_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int ret; switch (reg->match.type) { case V4L2_CHIP_MATCH_AC97: mutex_lock(&dev->lock); ret = em28xx_read_ac97(dev, reg->reg); mutex_unlock(&dev->lock); if (ret < 0) return ret; reg->val = ret; reg->size = 1; return 0; case V4L2_CHIP_MATCH_I2C_DRIVER: v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_register, reg); return 0; case V4L2_CHIP_MATCH_I2C_ADDR: /* TODO: is this correct? */ v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_register, reg); return 0; default: if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; } /* Match host */ reg->size = em28xx_reg_len(reg->reg); if (reg->size == 1) { mutex_lock(&dev->lock); ret = em28xx_read_reg(dev, reg->reg); mutex_unlock(&dev->lock); if (ret < 0) return ret; reg->val = ret; } else { __le16 val = 0; mutex_lock(&dev->lock); ret = em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS, reg->reg, (char *)&val, 2); mutex_unlock(&dev->lock); if (ret < 0) return ret; reg->val = le16_to_cpu(val); } return 0; } static int vidioc_s_register(struct file *file, void *priv, struct v4l2_dbg_register *reg) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; __le16 buf; int rc; switch (reg->match.type) { case V4L2_CHIP_MATCH_AC97: mutex_lock(&dev->lock); rc = em28xx_write_ac97(dev, reg->reg, reg->val); mutex_unlock(&dev->lock); return rc; case V4L2_CHIP_MATCH_I2C_DRIVER: v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_register, reg); return 0; case V4L2_CHIP_MATCH_I2C_ADDR: /* TODO: is this correct? */ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_register, reg); return 0; default: if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; } /* Match host */ buf = cpu_to_le16(reg->val); mutex_lock(&dev->lock); rc = em28xx_write_regs(dev, reg->reg, (char *)&buf, em28xx_reg_len(reg->reg)); mutex_unlock(&dev->lock); return rc; } #endif static int vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cc) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; cc->bounds.left = 0; cc->bounds.top = 0; cc->bounds.width = dev->width; cc->bounds.height = dev->height; cc->defrect = cc->bounds; cc->pixelaspect.numerator = 54; /* 4:3 FIXME: remove magic numbers */ cc->pixelaspect.denominator = 59; return 0; } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc = -EINVAL; rc = check_dev(dev); if (rc < 0) return rc; if (unlikely(type != fh->type)) return -EINVAL; em28xx_videodbg("vidioc_streamon fh=%p t=%d fh->res=%d dev->res=%d\n", fh, type, fh->resources, dev->resources); if (unlikely(!res_get(fh, get_ressource(fh)))) return -EBUSY; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) rc = videobuf_streamon(&fh->vb_vidq); else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) rc = videobuf_streamon(&fh->vb_vbiq); return rc; } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && fh->type != V4L2_BUF_TYPE_VBI_CAPTURE) return -EINVAL; if (type != fh->type) return -EINVAL; em28xx_videodbg("vidioc_streamoff fh=%p t=%d fh->res=%d dev->res=%d\n", fh, type, fh->resources, dev->resources); if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { videobuf_streamoff(&fh->vb_vidq); res_free(fh, EM28XX_RESOURCE_VIDEO); } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) { videobuf_streamoff(&fh->vb_vbiq); res_free(fh, EM28XX_RESOURCE_VBI); } return 0; } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; strlcpy(cap->driver, "em28xx", sizeof(cap->driver)); strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card)); usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); cap->version = EM28XX_VERSION_CODE; cap->capabilities = V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; if (dev->vbi_dev) cap->capabilities |= V4L2_CAP_VBI_CAPTURE; if (dev->audio_mode.has_audio) cap->capabilities |= V4L2_CAP_AUDIO; if (dev->tuner_type != TUNER_ABSENT) cap->capabilities |= V4L2_CAP_TUNER; return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (unlikely(f->index >= ARRAY_SIZE(format))) return -EINVAL; strlcpy(f->description, format[f->index].name, sizeof(f->description)); f->pixelformat = format[f->index].fourcc; return 0; } /* Sliced VBI ioctls */ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; mutex_lock(&dev->lock); f->fmt.sliced.service_set = 0; v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced); if (f->fmt.sliced.service_set == 0) rc = -EINVAL; mutex_unlock(&dev->lock); return rc; } static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; mutex_lock(&dev->lock); v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced); mutex_unlock(&dev->lock); if (f->fmt.sliced.service_set == 0) return -EINVAL; return 0; } /* RAW VBI ioctls */ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *format) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; format->fmt.vbi.samples_per_line = dev->vbi_width; format->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY; format->fmt.vbi.offset = 0; format->fmt.vbi.flags = 0; format->fmt.vbi.sampling_rate = 6750000 * 4 / 2; format->fmt.vbi.count[0] = dev->vbi_height; format->fmt.vbi.count[1] = dev->vbi_height; /* Varies by video standard (NTSC, PAL, etc.) */ if (dev->norm & V4L2_STD_525_60) { /* NTSC */ format->fmt.vbi.start[0] = 10; format->fmt.vbi.start[1] = 273; } else if (dev->norm & V4L2_STD_625_50) { /* PAL */ format->fmt.vbi.start[0] = 6; format->fmt.vbi.start[1] = 318; } return 0; } static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *format) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; format->fmt.vbi.samples_per_line = dev->vbi_width; format->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY; format->fmt.vbi.offset = 0; format->fmt.vbi.flags = 0; format->fmt.vbi.sampling_rate = 6750000 * 4 / 2; format->fmt.vbi.count[0] = dev->vbi_height; format->fmt.vbi.count[1] = dev->vbi_height; /* Varies by video standard (NTSC, PAL, etc.) */ if (dev->norm & V4L2_STD_525_60) { /* NTSC */ format->fmt.vbi.start[0] = 10; format->fmt.vbi.start[1] = 273; } else if (dev->norm & V4L2_STD_625_50) { /* PAL */ format->fmt.vbi.start[0] = 6; format->fmt.vbi.start[1] = 318; } return 0; } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return videobuf_reqbufs(&fh->vb_vidq, rb); else return videobuf_reqbufs(&fh->vb_vbiq, rb); } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return videobuf_querybuf(&fh->vb_vidq, b); else { /* FIXME: I'm not sure yet whether this is a bug in zvbi or the videobuf framework, but we probably shouldn't be returning a buffer larger than that which was asked for. At a minimum, it causes a crash in zvbi since it does a memcpy based on the source buffer length */ int result = videobuf_querybuf(&fh->vb_vbiq, b); b->length = dev->vbi_width * dev->vbi_height * 2; return result; } } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return videobuf_qbuf(&fh->vb_vidq, b); else return videobuf_qbuf(&fh->vb_vbiq, b); } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); else return videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK); } #ifdef CONFIG_VIDEO_V4L1_COMPAT static int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf) { struct em28xx_fh *fh = priv; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return videobuf_cgmbuf(&fh->vb_vidq, mbuf, 8); else return videobuf_cgmbuf(&fh->vb_vbiq, mbuf, 8); } #endif /* ----------------------------------------------------------- */ /* RADIO ESPECIFIC IOCTLS */ /* ----------------------------------------------------------- */ static int radio_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct em28xx *dev = ((struct em28xx_fh *)priv)->dev; strlcpy(cap->driver, "em28xx", sizeof(cap->driver)); strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card)); usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); cap->version = EM28XX_VERSION_CODE; cap->capabilities = V4L2_CAP_TUNER; return 0; } static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct em28xx *dev = ((struct em28xx_fh *)priv)->dev; if (unlikely(t->index > 0)) return -EINVAL; strcpy(t->name, "Radio"); t->type = V4L2_TUNER_RADIO; mutex_lock(&dev->lock); v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t); mutex_unlock(&dev->lock); return 0; } static int radio_enum_input(struct file *file, void *priv, struct v4l2_input *i) { if (i->index != 0) return -EINVAL; strcpy(i->name, "Radio"); i->type = V4L2_INPUT_TYPE_TUNER; return 0; } static int radio_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { if (unlikely(a->index)) return -EINVAL; strcpy(a->name, "Radio"); return 0; } static int radio_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct em28xx *dev = ((struct em28xx_fh *)priv)->dev; if (0 != t->index) return -EINVAL; mutex_lock(&dev->lock); v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t); mutex_unlock(&dev->lock); return 0; } static int radio_s_audio(struct file *file, void *fh, struct v4l2_audio *a) { return 0; } static int radio_s_input(struct file *file, void *fh, unsigned int i) { return 0; } static int radio_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { int i; if (qc->id < V4L2_CID_BASE || qc->id >= V4L2_CID_LASTP1) return -EINVAL; for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++) { if (qc->id && qc->id == ac97_qctrl[i].id) { memcpy(qc, &(ac97_qctrl[i]), sizeof(*qc)); return 0; } } return -EINVAL; } /* * em28xx_v4l2_open() * inits the device and starts isoc transfer */ static int em28xx_v4l2_open(struct file *filp) { int errCode = 0, radio = 0; struct video_device *vdev = video_devdata(filp); struct em28xx *dev = video_drvdata(filp); enum v4l2_buf_type fh_type = 0; struct em28xx_fh *fh; enum v4l2_field field; switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; break; case VFL_TYPE_VBI: fh_type = V4L2_BUF_TYPE_VBI_CAPTURE; break; case VFL_TYPE_RADIO: radio = 1; break; } mutex_lock(&dev->lock); em28xx_videodbg("open dev=%s type=%s users=%d\n", video_device_node_name(vdev), v4l2_type_names[fh_type], dev->users); fh = kzalloc(sizeof(struct em28xx_fh), GFP_KERNEL); if (!fh) { em28xx_errdev("em28xx-video.c: Out of memory?!\n"); mutex_unlock(&dev->lock); return -ENOMEM; } fh->dev = dev; fh->radio = radio; fh->type = fh_type; filp->private_data = fh; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) { em28xx_set_mode(dev, EM28XX_ANALOG_MODE); em28xx_set_alternate(dev); em28xx_resolution_set(dev); /* Needed, since GPIO might have disabled power of some i2c device */ em28xx_wake_i2c(dev); } if (fh->radio) { em28xx_videodbg("video_open: setting radio device\n"); v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_radio); } dev->users++; if (dev->progressive) field = V4L2_FIELD_NONE; else field = V4L2_FIELD_INTERLACED; videobuf_queue_vmalloc_init(&fh->vb_vidq, &em28xx_video_qops, NULL, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, field, sizeof(struct em28xx_buffer), fh); videobuf_queue_vmalloc_init(&fh->vb_vbiq, &em28xx_vbi_qops, NULL, &dev->slock, V4L2_BUF_TYPE_VBI_CAPTURE, V4L2_FIELD_SEQ_TB, sizeof(struct em28xx_buffer), fh); mutex_unlock(&dev->lock); return errCode; } /* * em28xx_realease_resources() * unregisters the v4l2,i2c and usb devices * called when the device gets disconected or at module unload */ void em28xx_release_analog_resources(struct em28xx *dev) { /*FIXME: I2C IR should be disconnected */ if (dev->radio_dev) { if (video_is_registered(dev->radio_dev)) video_unregister_device(dev->radio_dev); else video_device_release(dev->radio_dev); dev->radio_dev = NULL; } if (dev->vbi_dev) { em28xx_info("V4L2 device %s deregistered\n", video_device_node_name(dev->vbi_dev)); if (video_is_registered(dev->vbi_dev)) video_unregister_device(dev->vbi_dev); else video_device_release(dev->vbi_dev); dev->vbi_dev = NULL; } if (dev->vdev) { em28xx_info("V4L2 device %s deregistered\n", video_device_node_name(dev->vdev)); if (video_is_registered(dev->vdev)) video_unregister_device(dev->vdev); else video_device_release(dev->vdev); dev->vdev = NULL; } } /* * em28xx_v4l2_close() * stops streaming and deallocates all resources allocated by the v4l2 * calls and ioctls */ static int em28xx_v4l2_close(struct file *filp) { struct em28xx_fh *fh = filp->private_data; struct em28xx *dev = fh->dev; int errCode; em28xx_videodbg("users=%d\n", dev->users); if (res_check(fh, EM28XX_RESOURCE_VIDEO)) { videobuf_stop(&fh->vb_vidq); res_free(fh, EM28XX_RESOURCE_VIDEO); } if (res_check(fh, EM28XX_RESOURCE_VBI)) { videobuf_stop(&fh->vb_vbiq); res_free(fh, EM28XX_RESOURCE_VBI); } if (dev->users == 1) { /* the device is already disconnect, free the remaining resources */ if (dev->state & DEV_DISCONNECTED) { em28xx_release_resources(dev); kfree(dev); return 0; } /* Save some power by putting tuner to sleep */ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0); /* do this before setting alternate! */ em28xx_uninit_isoc(dev); em28xx_set_mode(dev, EM28XX_SUSPEND); /* set alternate 0 */ dev->alt = 0; em28xx_videodbg("setting alternate 0\n"); errCode = usb_set_interface(dev->udev, 0, 0); if (errCode < 0) { em28xx_errdev("cannot change alternate number to " "0 (error=%i)\n", errCode); } } videobuf_mmap_free(&fh->vb_vidq); videobuf_mmap_free(&fh->vb_vbiq); kfree(fh); dev->users--; wake_up_interruptible_nr(&dev->open, 1); return 0; } /* * em28xx_v4l2_read() * will allocate buffers when called for the first time */ static ssize_t em28xx_v4l2_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct em28xx_fh *fh = filp->private_data; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; /* FIXME: read() is not prepared to allow changing the video resolution while streaming. Seems a bug at em28xx_set_fmt */ if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { if (res_locked(dev, EM28XX_RESOURCE_VIDEO)) return -EBUSY; return videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0, filp->f_flags & O_NONBLOCK); } if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) { if (!res_get(fh, EM28XX_RESOURCE_VBI)) return -EBUSY; return videobuf_read_stream(&fh->vb_vbiq, buf, count, pos, 0, filp->f_flags & O_NONBLOCK); } return 0; } /* * em28xx_v4l2_poll() * will allocate buffers when called for the first time */ static unsigned int em28xx_v4l2_poll(struct file *filp, poll_table *wait) { struct em28xx_fh *fh = filp->private_data; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { if (!res_get(fh, EM28XX_RESOURCE_VIDEO)) return POLLERR; return videobuf_poll_stream(filp, &fh->vb_vidq, wait); } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) { if (!res_get(fh, EM28XX_RESOURCE_VBI)) return POLLERR; return videobuf_poll_stream(filp, &fh->vb_vbiq, wait); } else { return POLLERR; } } /* * em28xx_v4l2_mmap() */ static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma) { struct em28xx_fh *fh = filp->private_data; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) rc = videobuf_mmap_mapper(&fh->vb_vidq, vma); else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) rc = videobuf_mmap_mapper(&fh->vb_vbiq, vma); em28xx_videodbg("vma start=0x%08lx, size=%ld, ret=%d\n", (unsigned long)vma->vm_start, (unsigned long)vma->vm_end-(unsigned long)vma->vm_start, rc); return rc; } static const struct v4l2_file_operations em28xx_v4l_fops = { .owner = THIS_MODULE, .open = em28xx_v4l2_open, .release = em28xx_v4l2_close, .read = em28xx_v4l2_read, .poll = em28xx_v4l2_poll, .mmap = em28xx_v4l2_mmap, .ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, .vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, .vidioc_cropcap = vidioc_cropcap, .vidioc_g_fmt_sliced_vbi_cap = vidioc_g_fmt_sliced_vbi_cap, .vidioc_try_fmt_sliced_vbi_cap = vidioc_try_set_sliced_vbi_cap, .vidioc_s_fmt_sliced_vbi_cap = vidioc_try_set_sliced_vbi_cap, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_g_parm = vidioc_g_parm, .vidioc_s_parm = vidioc_s_parm, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, .vidioc_g_chip_ident = vidioc_g_chip_ident, #endif #ifdef CONFIG_VIDEO_V4L1_COMPAT .vidiocgmbuf = vidiocgmbuf, #endif }; static const struct video_device em28xx_video_template = { .fops = &em28xx_v4l_fops, .release = video_device_release, .ioctl_ops = &video_ioctl_ops, .tvnorms = V4L2_STD_ALL, .current_norm = V4L2_STD_PAL, }; static const struct v4l2_file_operations radio_fops = { .owner = THIS_MODULE, .open = em28xx_v4l2_open, .release = em28xx_v4l2_close, .ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops radio_ioctl_ops = { .vidioc_querycap = radio_querycap, .vidioc_g_tuner = radio_g_tuner, .vidioc_enum_input = radio_enum_input, .vidioc_g_audio = radio_g_audio, .vidioc_s_tuner = radio_s_tuner, .vidioc_s_audio = radio_s_audio, .vidioc_s_input = radio_s_input, .vidioc_queryctrl = radio_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static struct video_device em28xx_radio_template = { .name = "em28xx-radio", .fops = &radio_fops, .ioctl_ops = &radio_ioctl_ops, }; /******************************** usb interface ******************************/ static struct video_device *em28xx_vdev_init(struct em28xx *dev, const struct video_device *template, const char *type_name) { struct video_device *vfd; vfd = video_device_alloc(); if (NULL == vfd) return NULL; *vfd = *template; vfd->v4l2_dev = &dev->v4l2_dev; vfd->release = video_device_release; vfd->debug = video_debug; snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name); video_set_drvdata(vfd, dev); return vfd; } int em28xx_register_analog_devices(struct em28xx *dev) { u8 val; int ret; printk(KERN_INFO "%s: v4l2 driver version %d.%d.%d\n", dev->name, (EM28XX_VERSION_CODE >> 16) & 0xff, (EM28XX_VERSION_CODE >> 8) & 0xff, EM28XX_VERSION_CODE & 0xff); /* set default norm */ dev->norm = em28xx_video_template.current_norm; dev->interlaced = EM28XX_INTERLACED_DEFAULT; dev->ctl_input = 0; /* Analog specific initialization */ dev->format = &format[0]; em28xx_set_video_format(dev, format[0].fourcc, norm_maxw(dev), norm_maxh(dev)); video_mux(dev, dev->ctl_input); /* Audio defaults */ dev->mute = 1; dev->volume = 0x1f; /* em28xx_write_reg(dev, EM28XX_R0E_AUDIOSRC, 0xc0); audio register */ val = (u8)em28xx_read_reg(dev, EM28XX_R0F_XCLK); em28xx_write_reg(dev, EM28XX_R0F_XCLK, (EM28XX_XCLK_AUDIO_UNMUTE | val)); em28xx_set_outfmt(dev); em28xx_colorlevels_set_default(dev); em28xx_compression_disable(dev); /* allocate and fill video video_device struct */ dev->vdev = em28xx_vdev_init(dev, &em28xx_video_template, "video"); if (!dev->vdev) { em28xx_errdev("cannot allocate video_device.\n"); return -ENODEV; } /* register v4l2 video video_device */ ret = video_register_device(dev->vdev, VFL_TYPE_GRABBER, video_nr[dev->devno]); if (ret) { em28xx_errdev("unable to register video device (error=%i).\n", ret); return ret; } /* Allocate and fill vbi video_device struct */ if (em28xx_vbi_supported(dev) == 1) { dev->vbi_dev = em28xx_vdev_init(dev, &em28xx_video_template, "vbi"); /* register v4l2 vbi video_device */ ret = video_register_device(dev->vbi_dev, VFL_TYPE_VBI, vbi_nr[dev->devno]); if (ret < 0) { em28xx_errdev("unable to register vbi device\n"); return ret; } } if (em28xx_boards[dev->model].radio.type == EM28XX_RADIO) { dev->radio_dev = em28xx_vdev_init(dev, &em28xx_radio_template, "radio"); if (!dev->radio_dev) { em28xx_errdev("cannot allocate video_device.\n"); return -ENODEV; } ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO, radio_nr[dev->devno]); if (ret < 0) { em28xx_errdev("can't register radio device\n"); return ret; } em28xx_info("Registered radio device as %s\n", video_device_node_name(dev->radio_dev)); } em28xx_info("V4L2 video device registered as %s\n", video_device_node_name(dev->vdev)); if (dev->vbi_dev) em28xx_info("V4L2 VBI device registered as %s\n", video_device_node_name(dev->vbi_dev)); return 0; }
gpl-2.0
NieNs/IM-A750K
fs/nilfs2/inode.c
760
22057
/* * inode.c - NILFS inode operations. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Ryusuke Konishi <ryusuke@osrg.net> * */ #include <linux/buffer_head.h> #include <linux/gfp.h> #include <linux/mpage.h> #include <linux/writeback.h> #include <linux/uio.h> #include "nilfs.h" #include "segment.h" #include "page.h" #include "mdt.h" #include "cpfile.h" #include "ifile.h" /** * nilfs_get_block() - get a file block on the filesystem (callback function) * @inode - inode struct of the target file * @blkoff - file block number * @bh_result - buffer head to be mapped on * @create - indicate whether allocating the block or not when it has not * been allocated yet. * * This function does not issue actual read request of the specified data * block. It is done by VFS. */ int nilfs_get_block(struct inode *inode, sector_t blkoff, struct buffer_head *bh_result, int create) { struct nilfs_inode_info *ii = NILFS_I(inode); __u64 blknum = 0; int err = 0, ret; struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; down_read(&NILFS_MDT(dat)->mi_sem); ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); up_read(&NILFS_MDT(dat)->mi_sem); if (ret >= 0) { /* found */ map_bh(bh_result, inode->i_sb, blknum); if (ret > 0) bh_result->b_size = (ret << inode->i_blkbits); goto out; } /* data block was not found */ if (ret == -ENOENT && create) { struct nilfs_transaction_info ti; bh_result->b_blocknr = 0; err = nilfs_transaction_begin(inode->i_sb, &ti, 1); if (unlikely(err)) goto out; err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, (unsigned long)bh_result); if (unlikely(err != 0)) { if (err == -EEXIST) { /* * The get_block() function could be called * from multiple callers for an inode. * However, the page having this block must * be locked in this case. */ printk(KERN_WARNING "nilfs_get_block: a race condition " "while inserting a data block. " "(inode number=%lu, file block " "offset=%llu)\n", inode->i_ino, (unsigned long long)blkoff); err = 0; } else if (err == -EINVAL) { nilfs_error(inode->i_sb, __func__, "broken bmap (inode=%lu)\n", inode->i_ino); err = -EIO; } nilfs_transaction_abort(inode->i_sb); goto out; } nilfs_mark_inode_dirty(inode); nilfs_transaction_commit(inode->i_sb); /* never fails */ /* Error handling should be detailed */ set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed to proper value */ } else if (ret == -ENOENT) { /* not found is not error (e.g. hole); must return without the mapped state flag. */ ; } else { err = ret; } out: return err; } /** * nilfs_readpage() - implement readpage() method of nilfs_aops {} * address_space_operations. * @file - file struct of the file to be read * @page - the page to be read */ static int nilfs_readpage(struct file *file, struct page *page) { return mpage_readpage(page, nilfs_get_block); } /** * nilfs_readpages() - implement readpages() method of nilfs_aops {} * address_space_operations. * @file - file struct of the file to be read * @mapping - address_space struct used for reading multiple pages * @pages - the pages to be read * @nr_pages - number of pages to be read */ static int nilfs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); } static int nilfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; int err = 0; if (wbc->sync_mode == WB_SYNC_ALL) err = nilfs_construct_dsync_segment(inode->i_sb, inode, wbc->range_start, wbc->range_end); return err; } static int nilfs_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; int err; redirty_page_for_writepage(wbc, page); unlock_page(page); if (wbc->sync_mode == WB_SYNC_ALL) { err = nilfs_construct_segment(inode->i_sb); if (unlikely(err)) return err; } else if (wbc->for_reclaim) nilfs_flush_segment(inode->i_sb, inode->i_ino); return 0; } static int nilfs_set_page_dirty(struct page *page) { int ret = __set_page_dirty_buffers(page); if (ret) { struct inode *inode = page->mapping->host; struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); nilfs_set_file_dirty(sbi, inode, nr_dirty); } return ret; } static int nilfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); if (unlikely(err)) return err; *pagep = NULL; err = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, nilfs_get_block); if (unlikely(err)) nilfs_transaction_abort(inode->i_sb); return err; } static int nilfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; unsigned start = pos & (PAGE_CACHE_SIZE - 1); unsigned nr_dirty; int err; nr_dirty = nilfs_page_count_clean_buffers(page, start, start + copied); copied = generic_write_end(file, mapping, pos, len, copied, page, fsdata); nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty); err = nilfs_transaction_commit(inode->i_sb); return err ? : copied; } static ssize_t nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; ssize_t size; if (rw == WRITE) return 0; /* Needs synchronization with the cleaner */ size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, nilfs_get_block, NULL); return size; } const struct address_space_operations nilfs_aops = { .writepage = nilfs_writepage, .readpage = nilfs_readpage, .sync_page = block_sync_page, .writepages = nilfs_writepages, .set_page_dirty = nilfs_set_page_dirty, .readpages = nilfs_readpages, .write_begin = nilfs_write_begin, .write_end = nilfs_write_end, /* .releasepage = nilfs_releasepage, */ .invalidatepage = block_invalidatepage, .direct_IO = nilfs_direct_IO, .is_partially_uptodate = block_is_partially_uptodate, }; struct inode *nilfs_new_inode(struct inode *dir, int mode) { struct super_block *sb = dir->i_sb; struct nilfs_sb_info *sbi = NILFS_SB(sb); struct inode *inode; struct nilfs_inode_info *ii; int err = -ENOMEM; ino_t ino; inode = new_inode(sb); if (unlikely(!inode)) goto failed; mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); ii = NILFS_I(inode); ii->i_state = 1 << NILFS_I_NEW; err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh); if (unlikely(err)) goto failed_ifile_create_inode; /* reference count of i_bh inherits from nilfs_mdt_read_block() */ atomic_inc(&sbi->s_inodes_count); inode_init_owner(inode, dir, mode); inode->i_ino = ino; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { err = nilfs_bmap_read(ii->i_bmap, NULL); if (err < 0) goto failed_bmap; set_bit(NILFS_I_BMAP, &ii->i_state); /* No lock is needed; iget() ensures it. */ } ii->i_flags = NILFS_I(dir)->i_flags; if (S_ISLNK(mode)) ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL); if (!S_ISDIR(mode)) ii->i_flags &= ~NILFS_DIRSYNC_FL; /* ii->i_file_acl = 0; */ /* ii->i_dir_acl = 0; */ ii->i_dir_start_lookup = 0; ii->i_cno = 0; nilfs_set_inode_flags(inode); spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); insert_inode_hash(inode); err = nilfs_init_acl(inode, dir); if (unlikely(err)) goto failed_acl; /* never occur. When supporting nilfs_init_acl(), proper cancellation of above jobs should be considered */ return inode; failed_acl: failed_bmap: inode->i_nlink = 0; iput(inode); /* raw_inode will be deleted through generic_delete_inode() */ goto failed; failed_ifile_create_inode: make_bad_inode(inode); iput(inode); /* if i_nlink == 1, generic_forget_inode() will be called */ failed: return ERR_PTR(err); } void nilfs_free_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; struct nilfs_sb_info *sbi = NILFS_SB(sb); clear_inode(inode); /* XXX: check error code? Is there any thing I can do? */ (void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino); atomic_dec(&sbi->s_inodes_count); } void nilfs_set_inode_flags(struct inode *inode) { unsigned int flags = NILFS_I(inode)->i_flags; inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC); if (flags & NILFS_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & NILFS_APPEND_FL) inode->i_flags |= S_APPEND; if (flags & NILFS_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; #ifndef NILFS_ATIME_DISABLE if (flags & NILFS_NOATIME_FL) #endif inode->i_flags |= S_NOATIME; if (flags & NILFS_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); } int nilfs_read_inode_common(struct inode *inode, struct nilfs_inode *raw_inode) { struct nilfs_inode_info *ii = NILFS_I(inode); int err; inode->i_mode = le16_to_cpu(raw_inode->i_mode); inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid); inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid); inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); inode->i_size = le64_to_cpu(raw_inode->i_size); inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); if (inode->i_nlink == 0 && inode->i_mode == 0) return -EINVAL; /* this inode is deleted */ inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); ii->i_flags = le32_to_cpu(raw_inode->i_flags); #if 0 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); ii->i_dir_acl = S_ISREG(inode->i_mode) ? 0 : le32_to_cpu(raw_inode->i_dir_acl); #endif ii->i_dir_start_lookup = 0; ii->i_cno = 0; inode->i_generation = le32_to_cpu(raw_inode->i_generation); if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) { err = nilfs_bmap_read(ii->i_bmap, raw_inode); if (err < 0) return err; set_bit(NILFS_I_BMAP, &ii->i_state); /* No lock is needed; iget() ensures it. */ } return 0; } static int __nilfs_read_inode(struct super_block *sb, unsigned long ino, struct inode *inode) { struct nilfs_sb_info *sbi = NILFS_SB(sb); struct inode *dat = nilfs_dat_inode(sbi->s_nilfs); struct buffer_head *bh; struct nilfs_inode *raw_inode; int err; down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh); if (unlikely(err)) goto bad_inode; raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh); err = nilfs_read_inode_common(inode, raw_inode); if (err) goto failed_unmap; if (S_ISREG(inode->i_mode)) { inode->i_op = &nilfs_file_inode_operations; inode->i_fop = &nilfs_file_operations; inode->i_mapping->a_ops = &nilfs_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &nilfs_dir_inode_operations; inode->i_fop = &nilfs_dir_operations; inode->i_mapping->a_ops = &nilfs_aops; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &nilfs_symlink_inode_operations; inode->i_mapping->a_ops = &nilfs_aops; } else { inode->i_op = &nilfs_special_inode_operations; init_special_inode( inode, inode->i_mode, huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); } nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); brelse(bh); up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ nilfs_set_inode_flags(inode); return 0; failed_unmap: nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); brelse(bh); bad_inode: up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ return err; } struct inode *nilfs_iget(struct super_block *sb, unsigned long ino) { struct inode *inode; int err; inode = iget_locked(sb, ino); if (unlikely(!inode)) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; err = __nilfs_read_inode(sb, ino, inode); if (unlikely(err)) { iget_failed(inode); return ERR_PTR(err); } unlock_new_inode(inode); return inode; } void nilfs_write_inode_common(struct inode *inode, struct nilfs_inode *raw_inode, int has_bmap) { struct nilfs_inode_info *ii = NILFS_I(inode); raw_inode->i_mode = cpu_to_le16(inode->i_mode); raw_inode->i_uid = cpu_to_le32(inode->i_uid); raw_inode->i_gid = cpu_to_le32(inode->i_gid); raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); raw_inode->i_size = cpu_to_le64(inode->i_size); raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); raw_inode->i_flags = cpu_to_le32(ii->i_flags); raw_inode->i_generation = cpu_to_le32(inode->i_generation); if (has_bmap) nilfs_bmap_write(ii->i_bmap, raw_inode); else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) raw_inode->i_device_code = cpu_to_le64(huge_encode_dev(inode->i_rdev)); /* When extending inode, nilfs->ns_inode_size should be checked for substitutions of appended fields */ } void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh) { ino_t ino = inode->i_ino; struct nilfs_inode_info *ii = NILFS_I(inode); struct super_block *sb = inode->i_sb; struct nilfs_sb_info *sbi = NILFS_SB(sb); struct nilfs_inode *raw_inode; raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh); if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size); set_bit(NILFS_I_INODE_DIRTY, &ii->i_state); nilfs_write_inode_common(inode, raw_inode, 0); /* XXX: call with has_bmap = 0 is a workaround to avoid deadlock of bmap. This delays update of i_bmap to just before writing */ nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh); } #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, unsigned long from) { unsigned long b; int ret; if (!test_bit(NILFS_I_BMAP, &ii->i_state)) return; repeat: ret = nilfs_bmap_last_key(ii->i_bmap, &b); if (ret == -ENOENT) return; else if (ret < 0) goto failed; if (b < from) return; b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from); ret = nilfs_bmap_truncate(ii->i_bmap, b); nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); if (!ret || (ret == -ENOMEM && nilfs_bmap_truncate(ii->i_bmap, b) == 0)) goto repeat; failed: if (ret == -EINVAL) nilfs_error(ii->vfs_inode.i_sb, __func__, "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino); else nilfs_warning(ii->vfs_inode.i_sb, __func__, "failed to truncate bmap (ino=%lu, err=%d)", ii->vfs_inode.i_ino, ret); } void nilfs_truncate(struct inode *inode) { unsigned long blkoff; unsigned int blocksize; struct nilfs_transaction_info ti; struct super_block *sb = inode->i_sb; struct nilfs_inode_info *ii = NILFS_I(inode); if (!test_bit(NILFS_I_BMAP, &ii->i_state)) return; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return; blocksize = sb->s_blocksize; blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; nilfs_transaction_begin(sb, &ti, 0); /* never fails */ block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); nilfs_truncate_bmap(ii, blkoff); inode->i_mtime = inode->i_ctime = CURRENT_TIME; if (IS_SYNC(inode)) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_mark_inode_dirty(inode); nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); nilfs_transaction_commit(sb); /* May construct a logical segment and may fail in sync mode. But truncate has no return value. */ } void nilfs_delete_inode(struct inode *inode) { struct nilfs_transaction_info ti; struct super_block *sb = inode->i_sb; struct nilfs_inode_info *ii = NILFS_I(inode); if (unlikely(is_bad_inode(inode))) { if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); return; } nilfs_transaction_begin(sb, &ti, 0); /* never fails */ if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); nilfs_truncate_bmap(ii, 0); nilfs_mark_inode_dirty(inode); nilfs_free_inode(inode); /* nilfs_free_inode() marks inode buffer dirty */ if (IS_SYNC(inode)) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_transaction_commit(sb); /* May construct a logical segment and may fail in sync mode. But delete_inode has no return value. */ } int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) { struct nilfs_transaction_info ti; struct inode *inode = dentry->d_inode; struct super_block *sb = inode->i_sb; int err; err = inode_change_ok(inode, iattr); if (err) return err; err = nilfs_transaction_begin(sb, &ti, 0); if (unlikely(err)) return err; err = inode_setattr(inode, iattr); if (!err && (iattr->ia_valid & ATTR_MODE)) err = nilfs_acl_chmod(inode); if (likely(!err)) err = nilfs_transaction_commit(sb); else nilfs_transaction_abort(sb); return err; } int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode, struct buffer_head **pbh) { struct nilfs_inode_info *ii = NILFS_I(inode); int err; spin_lock(&sbi->s_inode_lock); if (ii->i_bh == NULL) { spin_unlock(&sbi->s_inode_lock); err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino, pbh); if (unlikely(err)) return err; spin_lock(&sbi->s_inode_lock); if (ii->i_bh == NULL) ii->i_bh = *pbh; else { brelse(*pbh); *pbh = ii->i_bh; } } else *pbh = ii->i_bh; get_bh(*pbh); spin_unlock(&sbi->s_inode_lock); return 0; } int nilfs_inode_dirty(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); int ret = 0; if (!list_empty(&ii->i_dirty)) { spin_lock(&sbi->s_inode_lock); ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || test_bit(NILFS_I_BUSY, &ii->i_state); spin_unlock(&sbi->s_inode_lock); } return ret; } int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode, unsigned nr_dirty) { struct nilfs_inode_info *ii = NILFS_I(inode); atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks); if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) return 0; spin_lock(&sbi->s_inode_lock); if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && !test_bit(NILFS_I_BUSY, &ii->i_state)) { /* Because this routine may race with nilfs_dispose_list(), we have to check NILFS_I_QUEUED here, too. */ if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { /* This will happen when somebody is freeing this inode. */ nilfs_warning(sbi->s_super, __func__, "cannot get inode (ino=%lu)\n", inode->i_ino); spin_unlock(&sbi->s_inode_lock); return -EINVAL; /* NILFS_I_DIRTY may remain for freeing inode */ } list_del(&ii->i_dirty); list_add_tail(&ii->i_dirty, &sbi->s_dirty_files); set_bit(NILFS_I_QUEUED, &ii->i_state); } spin_unlock(&sbi->s_inode_lock); return 0; } int nilfs_mark_inode_dirty(struct inode *inode) { struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); struct buffer_head *ibh; int err; err = nilfs_load_inode_block(sbi, inode, &ibh); if (unlikely(err)) { nilfs_warning(inode->i_sb, __func__, "failed to reget inode block.\n"); return err; } nilfs_update_inode(inode, ibh); nilfs_mdt_mark_buffer_dirty(ibh); nilfs_mdt_mark_dirty(sbi->s_ifile); brelse(ibh); return 0; } /** * nilfs_dirty_inode - reflect changes on given inode to an inode block. * @inode: inode of the file to be registered. * * nilfs_dirty_inode() loads a inode block containing the specified * @inode and copies data from a nilfs_inode to a corresponding inode * entry in the inode block. This operation is excluded from the segment * construction. This function can be called both as a single operation * and as a part of indivisible file operations. */ void nilfs_dirty_inode(struct inode *inode) { struct nilfs_transaction_info ti; if (is_bad_inode(inode)) { nilfs_warning(inode->i_sb, __func__, "tried to mark bad_inode dirty. ignored.\n"); dump_stack(); return; } nilfs_transaction_begin(inode->i_sb, &ti, 0); nilfs_mark_inode_dirty(inode); nilfs_transaction_commit(inode->i_sb); /* never fails */ }
gpl-2.0
arjen75/ics-lge-kernel-msm7x27-chick
drivers/net/ixgbe/ixgbe_dcb_nl.c
760
18074
/******************************************************************************* Intel 10 Gigabit PCI Express Linux driver Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "ixgbe.h" #include <linux/dcbnl.h> #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" /* Callbacks for DCB netlink in the kernel */ #define BIT_DCB_MODE 0x01 #define BIT_PFC 0x02 #define BIT_PG_RX 0x04 #define BIT_PG_TX 0x08 #define BIT_APP_UPCHG 0x10 #define BIT_RESETLINK 0x40 #define BIT_LINKSPEED 0x80 /* Responses for the DCB_C_SET_ALL command */ #define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ #define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ #define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) { struct tc_configuration *src_tc_cfg = NULL; struct tc_configuration *dst_tc_cfg = NULL; int i; if (!src_dcb_cfg || !dst_dcb_cfg) return -EINVAL; for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; dst_tc_cfg->path[DCB_TX_CONFIG].prio_type = src_tc_cfg->path[DCB_TX_CONFIG].prio_type; dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id = src_tc_cfg->path[DCB_TX_CONFIG].bwg_id; dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent = src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent; dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap = src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap; dst_tc_cfg->path[DCB_RX_CONFIG].prio_type = src_tc_cfg->path[DCB_RX_CONFIG].prio_type; dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id = src_tc_cfg->path[DCB_RX_CONFIG].bwg_id; dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent = src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent; dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap = src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap; } for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG] [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG] [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; } for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc = src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; } dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable; return 0; } static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); } static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) { u8 err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); if (state > 0) { /* Turn on DCB */ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) goto out; if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n"); err = 1; goto out; } if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); if (adapter->hw.mac.type == ixgbe_mac_82598EB) { adapter->last_lfc_mode = adapter->hw.fc.current_mode; adapter->hw.fc.requested_mode = ixgbe_fc_none; } adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; if (adapter->hw.mac.type == ixgbe_mac_82599EB) { adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; } adapter->flags |= IXGBE_FLAG_DCB_ENABLED; ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); } else { /* Turn off DCB */ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags |= IXGBE_FLAG_RSS_ENABLED; if (adapter->hw.mac.type == ixgbe_mac_82599EB) adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); } } out: return err; } static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i, j; memset(perm_addr, 0xff, MAX_ADDR_LEN); for (i = 0; i < netdev->addr_len; i++) perm_addr[i] = adapter->hw.mac.perm_addr[i]; if (adapter->hw.mac.type == ixgbe_mac_82599EB) { for (j = 0; j < netdev->addr_len; j++, i++) perm_addr[i] = adapter->hw.mac.san_addr[j]; } } static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 prio, u8 bwg_id, u8 bw_pct, u8 up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = bw_pct; if (up_map != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = up_map; if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type != adapter->dcb_cfg.tc_config[tc].path[0].prio_type) || (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id != adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) || (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) { adapter->dcb_set_bitmap |= BIT_PG_TX; adapter->dcb_set_bitmap |= BIT_RESETLINK; } } static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, u8 bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != adapter->dcb_cfg.bw_percentage[0][bwg_id]) { adapter->dcb_set_bitmap |= BIT_PG_TX; adapter->dcb_set_bitmap |= BIT_RESETLINK; } } static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, u8 prio, u8 bwg_id, u8 bw_pct, u8 up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = bw_pct; if (up_map != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = up_map; if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type != adapter->dcb_cfg.tc_config[tc].path[1].prio_type) || (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id != adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) || (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) { adapter->dcb_set_bitmap |= BIT_PG_RX; adapter->dcb_set_bitmap |= BIT_RESETLINK; } } static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, u8 bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != adapter->dcb_cfg.bw_percentage[1][bwg_id]) { adapter->dcb_set_bitmap |= BIT_PG_RX; adapter->dcb_set_bitmap |= BIT_RESETLINK; } } static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, u8 *bwg_id, u8 *bw_pct, u8 *up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; } static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, u8 *bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; } static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, u8 *prio, u8 *bwg_id, u8 *bw_pct, u8 *up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; } static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, u8 *bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; } static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, u8 setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != adapter->dcb_cfg.tc_config[priority].dcb_pfc) { adapter->dcb_set_bitmap |= BIT_PFC; adapter->temp_dcb_cfg.pfc_mode_enable = true; } } static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, u8 *setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; } static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int ret; if (!adapter->dcb_set_bitmap) return DCB_NO_HW_CHG; ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, adapter->ring_feature[RING_F_DCB].indices); if (ret) return DCB_NO_HW_CHG; /* * Only take down the adapter if the configuration change * requires a reset. */ if (adapter->dcb_set_bitmap & BIT_RESETLINK) { while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) msleep(1); if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); } else { if (netif_running(netdev)) ixgbe_down(adapter); } } if (adapter->dcb_cfg.pfc_mode_enable) { if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) adapter->last_lfc_mode = adapter->hw.fc.current_mode; adapter->hw.fc.requested_mode = ixgbe_fc_pfc; } else { if (adapter->hw.mac.type != ixgbe_mac_82598EB) adapter->hw.fc.requested_mode = adapter->last_lfc_mode; else adapter->hw.fc.requested_mode = ixgbe_fc_none; } if (adapter->dcb_set_bitmap & BIT_RESETLINK) { if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); } else { if (netif_running(netdev)) ixgbe_up(adapter); } ret = DCB_HW_CHG_RST; } else if (adapter->dcb_set_bitmap & BIT_PFC) { if (adapter->hw.mac.type == ixgbe_mac_82598EB) ixgbe_dcb_config_pfc_82598(&adapter->hw, &adapter->dcb_cfg); else if (adapter->hw.mac.type == ixgbe_mac_82599EB) ixgbe_dcb_config_pfc_82599(&adapter->hw, &adapter->dcb_cfg); ret = DCB_HW_CHG; } if (adapter->dcb_cfg.pfc_mode_enable) adapter->hw.fc.current_mode = ixgbe_fc_pfc; if (adapter->dcb_set_bitmap & BIT_RESETLINK) clear_bit(__IXGBE_RESETTING, &adapter->state); adapter->dcb_set_bitmap = 0x00; return ret; } static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) { struct ixgbe_adapter *adapter = netdev_priv(netdev); u8 rval = 0; if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { switch (capid) { case DCB_CAP_ATTR_PG: *cap = true; break; case DCB_CAP_ATTR_PFC: *cap = true; break; case DCB_CAP_ATTR_UP2TC: *cap = false; break; case DCB_CAP_ATTR_PG_TCS: *cap = 0x80; break; case DCB_CAP_ATTR_PFC_TCS: *cap = 0x80; break; case DCB_CAP_ATTR_GSP: *cap = true; break; case DCB_CAP_ATTR_BCN: *cap = false; break; default: rval = -EINVAL; break; } } else { rval = -EINVAL; } return rval; } static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { struct ixgbe_adapter *adapter = netdev_priv(netdev); u8 rval = 0; if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { switch (tcid) { case DCB_NUMTCS_ATTR_PG: *num = MAX_TRAFFIC_CLASS; break; case DCB_NUMTCS_ATTR_PFC: *num = MAX_TRAFFIC_CLASS; break; default: rval = -EINVAL; break; } } else { rval = -EINVAL; } return rval; } static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) { return -EINVAL; } static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return adapter->dcb_cfg.pfc_mode_enable; } static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.pfc_mode_enable = state; if (adapter->temp_dcb_cfg.pfc_mode_enable != adapter->dcb_cfg.pfc_mode_enable) adapter->dcb_set_bitmap |= BIT_PFC; } /** * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority * @netdev : the corresponding netdev * @idtype : identifies the id as ether type or TCP/UDP port number * @id: id is either ether type or TCP/UDP port number * * Returns : on success, returns a non-zero 802.1p user priority bitmap * otherwise returns 0 as the invalid user priority bitmap to indicate an * error. */ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) { u8 rval = 0; switch (idtype) { case DCB_APP_IDTYPE_ETHTYPE: #ifdef IXGBE_FCOE if (id == ETH_P_FCOE) rval = ixgbe_fcoe_getapp(netdev_priv(netdev)); #endif break; case DCB_APP_IDTYPE_PORTNUM: break; default: break; } return rval; } /** * ixgbe_dcbnl_setapp - set the DCBX application user priority * @netdev : the corresponding netdev * @idtype : identifies the id as ether type or TCP/UDP port number * @id: id is either ether type or TCP/UDP port number * @up: the 802.1p user priority bitmap * * Returns : 0 on success or 1 on error */ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 id, u8 up) { u8 rval = 1; switch (idtype) { case DCB_APP_IDTYPE_ETHTYPE: #ifdef IXGBE_FCOE if (id == ETH_P_FCOE) { u8 tc; struct ixgbe_adapter *adapter; adapter = netdev_priv(netdev); tc = adapter->fcoe.tc; rval = ixgbe_fcoe_setapp(adapter, up); if ((!rval) && (tc != adapter->fcoe.tc) && (adapter->flags & IXGBE_FLAG_DCB_ENABLED) && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { adapter->dcb_set_bitmap |= BIT_APP_UPCHG; adapter->dcb_set_bitmap |= BIT_RESETLINK; } } #endif break; case DCB_APP_IDTYPE_PORTNUM: break; default: break; } return rval; } const struct dcbnl_rtnl_ops dcbnl_ops = { .getstate = ixgbe_dcbnl_get_state, .setstate = ixgbe_dcbnl_set_state, .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx, .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx, .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx, .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx, .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx, .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx, .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx, .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx, .setpfccfg = ixgbe_dcbnl_set_pfc_cfg, .getpfccfg = ixgbe_dcbnl_get_pfc_cfg, .setall = ixgbe_dcbnl_set_all, .getcap = ixgbe_dcbnl_getcap, .getnumtcs = ixgbe_dcbnl_getnumtcs, .setnumtcs = ixgbe_dcbnl_setnumtcs, .getpfcstate = ixgbe_dcbnl_getpfcstate, .setpfcstate = ixgbe_dcbnl_setpfcstate, .getapp = ixgbe_dcbnl_getapp, .setapp = ixgbe_dcbnl_setapp, };
gpl-2.0
Chaosz-X/flyer_7x30_xkics_kernel
drivers/sbus/char/display7seg.c
760
6579
/* display7seg.c - Driver implementation for the 7-segment display * present on Sun Microsystems CP1400 and CP1500 * * Copyright (c) 2000 Eric Brower (ebrower@usa.net) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/major.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/ioport.h> /* request_region */ #include <linux/slab.h> #include <linux/smp_lock.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/atomic.h> #include <asm/uaccess.h> /* put_/get_user */ #include <asm/io.h> #include <asm/display7seg.h> #define D7S_MINOR 193 #define DRIVER_NAME "d7s" #define PFX DRIVER_NAME ": " static int sol_compat = 0; /* Solaris compatibility mode */ /* Solaris compatibility flag - * The Solaris implementation omits support for several * documented driver features (ref Sun doc 806-0180-03). * By default, this module supports the documented driver * abilities, rather than the Solaris implementation: * * 1) Device ALWAYS reverts to OBP-specified FLIPPED mode * upon closure of device or module unload. * 2) Device ioctls D7SIOCRD/D7SIOCWR honor toggling of * FLIP bit * * If you wish the device to operate as under Solaris, * omitting above features, set this parameter to non-zero. */ module_param(sol_compat, int, 0); MODULE_PARM_DESC(sol_compat, "Disables documented functionality omitted from Solaris driver"); MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); MODULE_DESCRIPTION("7-Segment Display driver for Sun Microsystems CP1400/1500"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("d7s"); struct d7s { void __iomem *regs; bool flipped; }; struct d7s *d7s_device; /* * Register block address- see header for details * ----------------------------------------- * | DP | ALARM | FLIP | 4 | 3 | 2 | 1 | 0 | * ----------------------------------------- * * DP - Toggles decimal point on/off * ALARM - Toggles "Alarm" LED green/red * FLIP - Inverts display for upside-down mounted board * bits 0-4 - 7-segment display contents */ static atomic_t d7s_users = ATOMIC_INIT(0); static int d7s_open(struct inode *inode, struct file *f) { if (D7S_MINOR != iminor(inode)) return -ENODEV; cycle_kernel_lock(); atomic_inc(&d7s_users); return 0; } static int d7s_release(struct inode *inode, struct file *f) { /* Reset flipped state to OBP default only if * no other users have the device open and we * are not operating in solaris-compat mode */ if (atomic_dec_and_test(&d7s_users) && !sol_compat) { struct d7s *p = d7s_device; u8 regval = 0; regval = readb(p->regs); if (p->flipped) regval |= D7S_FLIP; else regval &= ~D7S_FLIP; writeb(regval, p->regs); } return 0; } static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct d7s *p = d7s_device; u8 regs = readb(p->regs); int error = 0; u8 ireg = 0; if (D7S_MINOR != iminor(file->f_path.dentry->d_inode)) return -ENODEV; lock_kernel(); switch (cmd) { case D7SIOCWR: /* assign device register values we mask-out D7S_FLIP * if in sol_compat mode */ if (get_user(ireg, (int __user *) arg)) { error = -EFAULT; break; } if (sol_compat) { if (regs & D7S_FLIP) ireg |= D7S_FLIP; else ireg &= ~D7S_FLIP; } writeb(ireg, p->regs); break; case D7SIOCRD: /* retrieve device register values * NOTE: Solaris implementation returns D7S_FLIP bit * as toggled by user, even though it does not honor it. * This driver will not misinform you about the state * of your hardware while in sol_compat mode */ if (put_user(regs, (int __user *) arg)) { error = -EFAULT; break; } break; case D7SIOCTM: /* toggle device mode-- flip display orientation */ if (regs & D7S_FLIP) regs &= ~D7S_FLIP; else regs |= D7S_FLIP; writeb(regs, p->regs); break; }; unlock_kernel(); return error; } static const struct file_operations d7s_fops = { .owner = THIS_MODULE, .unlocked_ioctl = d7s_ioctl, .compat_ioctl = d7s_ioctl, .open = d7s_open, .release = d7s_release, }; static struct miscdevice d7s_miscdev = { .minor = D7S_MINOR, .name = DRIVER_NAME, .fops = &d7s_fops }; static int __devinit d7s_probe(struct of_device *op, const struct of_device_id *match) { struct device_node *opts; int err = -EINVAL; struct d7s *p; u8 regs; if (d7s_device) goto out; p = kzalloc(sizeof(*p), GFP_KERNEL); err = -ENOMEM; if (!p) goto out; p->regs = of_ioremap(&op->resource[0], 0, sizeof(u8), "d7s"); if (!p->regs) { printk(KERN_ERR PFX "Cannot map chip registers\n"); goto out_free; } err = misc_register(&d7s_miscdev); if (err) { printk(KERN_ERR PFX "Unable to acquire miscdevice minor %i\n", D7S_MINOR); goto out_iounmap; } /* OBP option "d7s-flipped?" is honored as default for the * device, and reset default when detached */ regs = readb(p->regs); opts = of_find_node_by_path("/options"); if (opts && of_get_property(opts, "d7s-flipped?", NULL)) p->flipped = true; if (p->flipped) regs |= D7S_FLIP; else regs &= ~D7S_FLIP; writeb(regs, p->regs); printk(KERN_INFO PFX "7-Segment Display%s at [%s:0x%llx] %s\n", op->dev.of_node->full_name, (regs & D7S_FLIP) ? " (FLIPPED)" : "", op->resource[0].start, sol_compat ? "in sol_compat mode" : ""); dev_set_drvdata(&op->dev, p); d7s_device = p; err = 0; out: return err; out_iounmap: of_iounmap(&op->resource[0], p->regs, sizeof(u8)); out_free: kfree(p); goto out; } static int __devexit d7s_remove(struct of_device *op) { struct d7s *p = dev_get_drvdata(&op->dev); u8 regs = readb(p->regs); /* Honor OBP d7s-flipped? unless operating in solaris-compat mode */ if (sol_compat) { if (p->flipped) regs |= D7S_FLIP; else regs &= ~D7S_FLIP; writeb(regs, p->regs); } misc_deregister(&d7s_miscdev); of_iounmap(&op->resource[0], p->regs, sizeof(u8)); kfree(p); return 0; } static const struct of_device_id d7s_match[] = { { .name = "display7seg", }, {}, }; MODULE_DEVICE_TABLE(of, d7s_match); static struct of_platform_driver d7s_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = d7s_match, }, .probe = d7s_probe, .remove = __devexit_p(d7s_remove), }; static int __init d7s_init(void) { return of_register_driver(&d7s_driver, &of_bus_type); } static void __exit d7s_exit(void) { of_unregister_driver(&d7s_driver); } module_init(d7s_init); module_exit(d7s_exit);
gpl-2.0
mtmichaelson/LG_Spectrum_Kernel
drivers/media/video/ivtv/ivtv-driver.c
760
44511
/* ivtv driver initialization and card probing Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2004 Chris Kennedy <c@groovy.org> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Main Driver file for the ivtv project: * Driver for the Conexant CX23415/CX23416 chip. * Author: Kevin Thayer (nufan_wfk at yahoo.com) * License: GPL * http://www.ivtvdriver.org * * ----- * MPG600/MPG160 support by T.Adachi <tadachi@tadachi-net.com> * and Takeru KOMORIYA<komoriya@paken.org> * * AVerMedia M179 GPIO info by Chris Pinkham <cpinkham@bc2va.org> * using information provided by Jiun-Kuei Jung @ AVerMedia. * * Kurouto Sikou CX23416GYC-STVLP tested by K.Ohta <alpha292@bremen.or.jp> * using information from T.Adachi,Takeru KOMORIYA and others :-) * * Nagase TRANSGEAR 5000TV, Aopen VA2000MAX-STN6 and I/O data GV-MVP/RX * version by T.Adachi. Special thanks Mr.Suzuki */ #include "ivtv-driver.h" #include "ivtv-version.h" #include "ivtv-fileops.h" #include "ivtv-i2c.h" #include "ivtv-firmware.h" #include "ivtv-queue.h" #include "ivtv-udma.h" #include "ivtv-irq.h" #include "ivtv-mailbox.h" #include "ivtv-streams.h" #include "ivtv-ioctl.h" #include "ivtv-cards.h" #include "ivtv-vbi.h" #include "ivtv-routing.h" #include "ivtv-gpio.h" #include <media/tveeprom.h> #include <media/saa7115.h> #include <media/v4l2-chip-ident.h> #include "tuner-xc2028.h" /* If you have already X v4l cards, then set this to X. This way the device numbers stay matched. Example: you have a WinTV card without radio and a PVR-350 with. Normally this would give a video1 device together with a radio0 device for the PVR. By setting this to 1 you ensure that radio0 is now also radio1. */ int ivtv_first_minor; /* add your revision and whatnot here */ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = { {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV16, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl); /* ivtv instance counter */ static atomic_t ivtv_instance = ATOMIC_INIT(0); /* Parameter declarations */ static int cardtype[IVTV_MAX_CARDS]; static int tuner[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static int radio[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static int i2c_clock_period[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static unsigned int cardtype_c = 1; static unsigned int tuner_c = 1; static unsigned int radio_c = 1; static unsigned int i2c_clock_period_c = 1; static char pal[] = "---"; static char secam[] = "--"; static char ntsc[] = "-"; /* Buffers */ /* DMA Buffers, Default size in MB allocated */ #define IVTV_DEFAULT_ENC_MPG_BUFFERS 4 #define IVTV_DEFAULT_ENC_YUV_BUFFERS 2 #define IVTV_DEFAULT_ENC_VBI_BUFFERS 1 /* Exception: size in kB for this stream (MB is overkill) */ #define IVTV_DEFAULT_ENC_PCM_BUFFERS 320 #define IVTV_DEFAULT_DEC_MPG_BUFFERS 1 #define IVTV_DEFAULT_DEC_YUV_BUFFERS 1 /* Exception: size in kB for this stream (MB is way overkill) */ #define IVTV_DEFAULT_DEC_VBI_BUFFERS 64 static int enc_mpg_buffers = IVTV_DEFAULT_ENC_MPG_BUFFERS; static int enc_yuv_buffers = IVTV_DEFAULT_ENC_YUV_BUFFERS; static int enc_vbi_buffers = IVTV_DEFAULT_ENC_VBI_BUFFERS; static int enc_pcm_buffers = IVTV_DEFAULT_ENC_PCM_BUFFERS; static int dec_mpg_buffers = IVTV_DEFAULT_DEC_MPG_BUFFERS; static int dec_yuv_buffers = IVTV_DEFAULT_DEC_YUV_BUFFERS; static int dec_vbi_buffers = IVTV_DEFAULT_DEC_VBI_BUFFERS; static int ivtv_yuv_mode; static int ivtv_yuv_threshold = -1; static int ivtv_pci_latency = 1; int ivtv_debug; static int tunertype = -1; static int newi2c = -1; module_param_array(tuner, int, &tuner_c, 0644); module_param_array(radio, bool, &radio_c, 0644); module_param_array(cardtype, int, &cardtype_c, 0644); module_param_string(pal, pal, sizeof(pal), 0644); module_param_string(secam, secam, sizeof(secam), 0644); module_param_string(ntsc, ntsc, sizeof(ntsc), 0644); module_param_named(debug,ivtv_debug, int, 0644); module_param(ivtv_pci_latency, int, 0644); module_param(ivtv_yuv_mode, int, 0644); module_param(ivtv_yuv_threshold, int, 0644); module_param(ivtv_first_minor, int, 0644); module_param(enc_mpg_buffers, int, 0644); module_param(enc_yuv_buffers, int, 0644); module_param(enc_vbi_buffers, int, 0644); module_param(enc_pcm_buffers, int, 0644); module_param(dec_mpg_buffers, int, 0644); module_param(dec_yuv_buffers, int, 0644); module_param(dec_vbi_buffers, int, 0644); module_param(tunertype, int, 0644); module_param(newi2c, int, 0644); module_param_array(i2c_clock_period, int, &i2c_clock_period_c, 0644); MODULE_PARM_DESC(tuner, "Tuner type selection,\n" "\t\t\tsee tuner.h for values"); MODULE_PARM_DESC(radio, "Enable or disable the radio. Use only if autodetection\n" "\t\t\tfails. 0 = disable, 1 = enable"); MODULE_PARM_DESC(cardtype, "Only use this option if your card is not detected properly.\n" "\t\tSpecify card type:\n" "\t\t\t 1 = WinTV PVR 250\n" "\t\t\t 2 = WinTV PVR 350\n" "\t\t\t 3 = WinTV PVR-150 or PVR-500\n" "\t\t\t 4 = AVerMedia M179\n" "\t\t\t 5 = YUAN MPG600/Kuroutoshikou iTVC16-STVLP\n" "\t\t\t 6 = YUAN MPG160/Kuroutoshikou iTVC15-STVLP\n" "\t\t\t 7 = YUAN PG600/DIAMONDMM PVR-550 (CX Falcon 2)\n" "\t\t\t 8 = Adaptec AVC-2410\n" "\t\t\t 9 = Adaptec AVC-2010\n" "\t\t\t10 = NAGASE TRANSGEAR 5000TV\n" "\t\t\t11 = AOpen VA2000MAX-STN6\n" "\t\t\t12 = YUAN MPG600GR/Kuroutoshikou CX23416GYC-STVLP\n" "\t\t\t13 = I/O Data GV-MVP/RX\n" "\t\t\t14 = I/O Data GV-MVP/RX2E\n" "\t\t\t15 = GOTVIEW PCI DVD\n" "\t\t\t16 = GOTVIEW PCI DVD2 Deluxe\n" "\t\t\t17 = Yuan MPC622\n" "\t\t\t18 = Digital Cowboy DCT-MTVP1\n" "\t\t\t19 = Yuan PG600V2/GotView PCI DVD Lite\n" "\t\t\t20 = Club3D ZAP-TV1x01\n" "\t\t\t21 = AverTV MCE 116 Plus\n" "\t\t\t22 = ASUS Falcon2\n" "\t\t\t23 = AverMedia PVR-150 Plus\n" "\t\t\t24 = AverMedia EZMaker PCI Deluxe\n" "\t\t\t25 = AverMedia M104 (not yet working)\n" "\t\t\t26 = Buffalo PC-MV5L/PCI\n" "\t\t\t27 = AVerMedia UltraTV 1500 MCE\n" "\t\t\t28 = Sony VAIO Giga Pocket (ENX Kikyou)\n" "\t\t\t 0 = Autodetect (default)\n" "\t\t\t-1 = Ignore this card\n\t\t"); MODULE_PARM_DESC(pal, "Set PAL standard: BGH, DK, I, M, N, Nc, 60"); MODULE_PARM_DESC(secam, "Set SECAM standard: BGH, DK, L, LC"); MODULE_PARM_DESC(ntsc, "Set NTSC standard: M, J (Japan), K (South Korea)"); MODULE_PARM_DESC(tunertype, "Specify tuner type:\n" "\t\t\t 0 = tuner for PAL-B/G/H/D/K/I, SECAM-B/G/H/D/K/L/Lc\n" "\t\t\t 1 = tuner for NTSC-M/J/K, PAL-M/N/Nc\n" "\t\t\t-1 = Autodetect (default)\n"); MODULE_PARM_DESC(debug, "Debug level (bitmask). Default: 0\n" "\t\t\t 1/0x0001: warning\n" "\t\t\t 2/0x0002: info\n" "\t\t\t 4/0x0004: mailbox\n" "\t\t\t 8/0x0008: ioctl\n" "\t\t\t 16/0x0010: file\n" "\t\t\t 32/0x0020: dma\n" "\t\t\t 64/0x0040: irq\n" "\t\t\t 128/0x0080: decoder\n" "\t\t\t 256/0x0100: yuv\n" "\t\t\t 512/0x0200: i2c\n" "\t\t\t1024/0x0400: high volume\n"); MODULE_PARM_DESC(ivtv_pci_latency, "Change the PCI latency to 64 if lower: 0 = No, 1 = Yes,\n" "\t\t\tDefault: Yes"); MODULE_PARM_DESC(ivtv_yuv_mode, "Specify the yuv playback mode:\n" "\t\t\t0 = interlaced\n\t\t\t1 = progressive\n\t\t\t2 = auto\n" "\t\t\tDefault: 0 (interlaced)"); MODULE_PARM_DESC(ivtv_yuv_threshold, "If ivtv_yuv_mode is 2 (auto) then playback content as\n\t\tprogressive if src height <= ivtv_yuvthreshold\n" "\t\t\tDefault: 480"); MODULE_PARM_DESC(enc_mpg_buffers, "Encoder MPG Buffers (in MB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_MPG_BUFFERS)); MODULE_PARM_DESC(enc_yuv_buffers, "Encoder YUV Buffers (in MB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_YUV_BUFFERS)); MODULE_PARM_DESC(enc_vbi_buffers, "Encoder VBI Buffers (in MB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_VBI_BUFFERS)); MODULE_PARM_DESC(enc_pcm_buffers, "Encoder PCM buffers (in kB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_PCM_BUFFERS)); MODULE_PARM_DESC(dec_mpg_buffers, "Decoder MPG buffers (in MB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_MPG_BUFFERS)); MODULE_PARM_DESC(dec_yuv_buffers, "Decoder YUV buffers (in MB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_YUV_BUFFERS)); MODULE_PARM_DESC(dec_vbi_buffers, "Decoder VBI buffers (in kB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_VBI_BUFFERS)); MODULE_PARM_DESC(newi2c, "Use new I2C implementation\n" "\t\t\t-1 is autodetect, 0 is off, 1 is on\n" "\t\t\tDefault is autodetect"); MODULE_PARM_DESC(i2c_clock_period, "Period of SCL for the I2C bus controlled by the CX23415/6\n" "\t\t\tMin: 10 usec (100 kHz), Max: 4500 usec (222 Hz)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_I2C_CLOCK_PERIOD)); MODULE_PARM_DESC(ivtv_first_minor, "Set device node number assigned to first card"); MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil"); MODULE_DESCRIPTION("CX23415/CX23416 driver"); MODULE_SUPPORTED_DEVICE ("CX23415/CX23416 MPEG2 encoder (WinTV PVR-150/250/350/500,\n" "\t\t\tYuan MPG series and similar)"); MODULE_LICENSE("GPL"); MODULE_VERSION(IVTV_VERSION); void ivtv_clear_irq_mask(struct ivtv *itv, u32 mask) { itv->irqmask &= ~mask; write_reg_sync(itv->irqmask, IVTV_REG_IRQMASK); } void ivtv_set_irq_mask(struct ivtv *itv, u32 mask) { itv->irqmask |= mask; write_reg_sync(itv->irqmask, IVTV_REG_IRQMASK); } int ivtv_set_output_mode(struct ivtv *itv, int mode) { int old_mode; spin_lock(&itv->lock); old_mode = itv->output_mode; if (old_mode == 0) itv->output_mode = old_mode = mode; spin_unlock(&itv->lock); return old_mode; } struct ivtv_stream *ivtv_get_output_stream(struct ivtv *itv) { switch (itv->output_mode) { case OUT_MPG: return &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; case OUT_YUV: return &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; default: return NULL; } } int ivtv_waitq(wait_queue_head_t *waitq) { DEFINE_WAIT(wait); prepare_to_wait(waitq, &wait, TASK_INTERRUPTIBLE); schedule(); finish_wait(waitq, &wait); return signal_pending(current) ? -EINTR : 0; } /* Generic utility functions */ int ivtv_msleep_timeout(unsigned int msecs, int intr) { int timeout = msecs_to_jiffies(msecs); do { set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); timeout = schedule_timeout(timeout); if (intr) { int ret = signal_pending(current); if (ret) return ret; } } while (timeout); return 0; } /* Release ioremapped memory */ static void ivtv_iounmap(struct ivtv *itv) { if (itv == NULL) return; /* Release registers memory */ if (itv->reg_mem != NULL) { IVTV_DEBUG_INFO("releasing reg_mem\n"); iounmap(itv->reg_mem); itv->reg_mem = NULL; } /* Release io memory */ if (itv->has_cx23415 && itv->dec_mem != NULL) { IVTV_DEBUG_INFO("releasing dec_mem\n"); iounmap(itv->dec_mem); } itv->dec_mem = NULL; /* Release io memory */ if (itv->enc_mem != NULL) { IVTV_DEBUG_INFO("releasing enc_mem\n"); iounmap(itv->enc_mem); itv->enc_mem = NULL; } } /* Hauppauge card? get values from tveeprom */ void ivtv_read_eeprom(struct ivtv *itv, struct tveeprom *tv) { u8 eedata[256]; itv->i2c_client.addr = 0xA0 >> 1; tveeprom_read(&itv->i2c_client, eedata, sizeof(eedata)); tveeprom_hauppauge_analog(&itv->i2c_client, tv, eedata); } static void ivtv_process_eeprom(struct ivtv *itv) { struct tveeprom tv; int pci_slot = PCI_SLOT(itv->pdev->devfn); ivtv_read_eeprom(itv, &tv); /* Many thanks to Steven Toth from Hauppauge for providing the model numbers */ switch (tv.model) { /* In a few cases the PCI subsystem IDs do not correctly identify the card. A better method is to check the model number from the eeprom instead. */ case 30012 ... 30039: /* Low profile PVR250 */ case 32000 ... 32999: case 48000 ... 48099: /* 48??? range are PVR250s with a cx23415 */ case 48400 ... 48599: itv->card = ivtv_get_card(IVTV_CARD_PVR_250); break; case 48100 ... 48399: case 48600 ... 48999: itv->card = ivtv_get_card(IVTV_CARD_PVR_350); break; case 23000 ... 23999: /* PVR500 */ case 25000 ... 25999: /* Low profile PVR150 */ case 26000 ... 26999: /* Regular PVR150 */ itv->card = ivtv_get_card(IVTV_CARD_PVR_150); break; case 0: IVTV_ERR("Invalid EEPROM\n"); return; default: IVTV_ERR("Unknown model %d, defaulting to PVR-150\n", tv.model); itv->card = ivtv_get_card(IVTV_CARD_PVR_150); break; } switch (tv.model) { /* Old style PVR350 (with an saa7114) uses this input for the tuner. */ case 48254: itv->card = ivtv_get_card(IVTV_CARD_PVR_350_V1); break; default: break; } itv->v4l2_cap = itv->card->v4l2_capabilities; itv->card_name = itv->card->name; itv->card_i2c = itv->card->i2c; /* If this is a PVR500 then it should be possible to detect whether it is the first or second unit by looking at the subsystem device ID: is bit 4 is set, then it is the second unit (according to info from Hauppauge). However, while this works for most cards, I have seen a few PVR500 cards where both units have the same subsystem ID. So instead I look at the reported 'PCI slot' (which is the slot on the PVR500 PCI bridge) and if it is 8, then it is assumed to be the first unit, otherwise it is the second unit. It is possible that it is a different slot when ivtv is used in Xen, in that case I ignore this card here. The worst that can happen is that the card presents itself with a non-working radio device. This detection is needed since the eeprom reports incorrectly that a radio is present on the second unit. */ if (tv.model / 1000 == 23) { static const struct ivtv_card_tuner_i2c ivtv_i2c_radio = { .radio = { 0x60, I2C_CLIENT_END }, .demod = { 0x43, I2C_CLIENT_END }, .tv = { 0x61, I2C_CLIENT_END }, }; itv->card_name = "WinTV PVR 500"; itv->card_i2c = &ivtv_i2c_radio; if (pci_slot == 8 || pci_slot == 9) { int is_first = (pci_slot & 1) == 0; itv->card_name = is_first ? "WinTV PVR 500 (unit #1)" : "WinTV PVR 500 (unit #2)"; if (!is_first) { IVTV_INFO("Correcting tveeprom data: no radio present on second unit\n"); tv.has_radio = 0; } } } IVTV_INFO("Autodetected %s\n", itv->card_name); switch (tv.tuner_hauppauge_model) { case 85: case 99: case 112: itv->pvr150_workaround = 1; break; default: break; } if (tv.tuner_type == TUNER_ABSENT) IVTV_ERR("tveeprom cannot autodetect tuner!\n"); if (itv->options.tuner == -1) itv->options.tuner = tv.tuner_type; if (itv->options.radio == -1) itv->options.radio = (tv.has_radio != 0); /* only enable newi2c if an IR blaster is present */ if (itv->options.newi2c == -1 && tv.has_ir) { itv->options.newi2c = (tv.has_ir & 4) ? 1 : 0; if (itv->options.newi2c) { IVTV_INFO("Reopen i2c bus for IR-blaster support\n"); exit_ivtv_i2c(itv); init_ivtv_i2c(itv); } } if (itv->std != 0) /* user specified tuner standard */ return; /* autodetect tuner standard */ if (tv.tuner_formats & V4L2_STD_PAL) { IVTV_DEBUG_INFO("PAL tuner detected\n"); itv->std |= V4L2_STD_PAL_BG | V4L2_STD_PAL_H; } else if (tv.tuner_formats & V4L2_STD_NTSC) { IVTV_DEBUG_INFO("NTSC tuner detected\n"); itv->std |= V4L2_STD_NTSC_M; } else if (tv.tuner_formats & V4L2_STD_SECAM) { IVTV_DEBUG_INFO("SECAM tuner detected\n"); itv->std |= V4L2_STD_SECAM_L; } else { IVTV_INFO("No tuner detected, default to NTSC-M\n"); itv->std |= V4L2_STD_NTSC_M; } } static v4l2_std_id ivtv_parse_std(struct ivtv *itv) { switch (pal[0]) { case '6': tunertype = 0; return V4L2_STD_PAL_60; case 'b': case 'B': case 'g': case 'G': case 'h': case 'H': tunertype = 0; return V4L2_STD_PAL_BG | V4L2_STD_PAL_H; case 'n': case 'N': tunertype = 1; if (pal[1] == 'c' || pal[1] == 'C') return V4L2_STD_PAL_Nc; return V4L2_STD_PAL_N; case 'i': case 'I': tunertype = 0; return V4L2_STD_PAL_I; case 'd': case 'D': case 'k': case 'K': tunertype = 0; return V4L2_STD_PAL_DK; case 'M': case 'm': tunertype = 1; return V4L2_STD_PAL_M; case '-': break; default: IVTV_WARN("pal= argument not recognised\n"); return 0; } switch (secam[0]) { case 'b': case 'B': case 'g': case 'G': case 'h': case 'H': tunertype = 0; return V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H; case 'd': case 'D': case 'k': case 'K': tunertype = 0; return V4L2_STD_SECAM_DK; case 'l': case 'L': tunertype = 0; if (secam[1] == 'C' || secam[1] == 'c') return V4L2_STD_SECAM_LC; return V4L2_STD_SECAM_L; case '-': break; default: IVTV_WARN("secam= argument not recognised\n"); return 0; } switch (ntsc[0]) { case 'm': case 'M': tunertype = 1; return V4L2_STD_NTSC_M; case 'j': case 'J': tunertype = 1; return V4L2_STD_NTSC_M_JP; case 'k': case 'K': tunertype = 1; return V4L2_STD_NTSC_M_KR; case '-': break; default: IVTV_WARN("ntsc= argument not recognised\n"); return 0; } /* no match found */ return 0; } static void ivtv_process_options(struct ivtv *itv) { const char *chipname; int i, j; itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_MPG] = enc_mpg_buffers * 1024; itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_YUV] = enc_yuv_buffers * 1024; itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_VBI] = enc_vbi_buffers * 1024; itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_PCM] = enc_pcm_buffers; itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_MPG] = dec_mpg_buffers * 1024; itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_YUV] = dec_yuv_buffers * 1024; itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_VBI] = dec_vbi_buffers; itv->options.cardtype = cardtype[itv->instance]; itv->options.tuner = tuner[itv->instance]; itv->options.radio = radio[itv->instance]; itv->options.i2c_clock_period = i2c_clock_period[itv->instance]; if (itv->options.i2c_clock_period == -1) itv->options.i2c_clock_period = IVTV_DEFAULT_I2C_CLOCK_PERIOD; else if (itv->options.i2c_clock_period < 10) itv->options.i2c_clock_period = 10; else if (itv->options.i2c_clock_period > 4500) itv->options.i2c_clock_period = 4500; itv->options.newi2c = newi2c; if (tunertype < -1 || tunertype > 1) { IVTV_WARN("Invalid tunertype argument, will autodetect instead\n"); tunertype = -1; } itv->std = ivtv_parse_std(itv); if (itv->std == 0 && tunertype >= 0) itv->std = tunertype ? V4L2_STD_MN : (V4L2_STD_ALL & ~V4L2_STD_MN); itv->has_cx23415 = (itv->pdev->device == PCI_DEVICE_ID_IVTV15); chipname = itv->has_cx23415 ? "cx23415" : "cx23416"; if (itv->options.cardtype == -1) { IVTV_INFO("Ignore card (detected %s based chip)\n", chipname); return; } if ((itv->card = ivtv_get_card(itv->options.cardtype - 1))) { IVTV_INFO("User specified %s card (detected %s based chip)\n", itv->card->name, chipname); } else if (itv->options.cardtype != 0) { IVTV_ERR("Unknown user specified type, trying to autodetect card\n"); } if (itv->card == NULL) { if (itv->pdev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE || itv->pdev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE_ALT1 || itv->pdev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE_ALT2) { itv->card = ivtv_get_card(itv->has_cx23415 ? IVTV_CARD_PVR_350 : IVTV_CARD_PVR_150); IVTV_INFO("Autodetected Hauppauge card (%s based)\n", chipname); } } if (itv->card == NULL) { for (i = 0; (itv->card = ivtv_get_card(i)); i++) { if (itv->card->pci_list == NULL) continue; for (j = 0; itv->card->pci_list[j].device; j++) { if (itv->pdev->device != itv->card->pci_list[j].device) continue; if (itv->pdev->subsystem_vendor != itv->card->pci_list[j].subsystem_vendor) continue; if (itv->pdev->subsystem_device != itv->card->pci_list[j].subsystem_device) continue; IVTV_INFO("Autodetected %s card (%s based)\n", itv->card->name, chipname); goto done; } } } done: if (itv->card == NULL) { itv->card = ivtv_get_card(IVTV_CARD_PVR_150); IVTV_ERR("Unknown card: vendor/device: [%04x:%04x]\n", itv->pdev->vendor, itv->pdev->device); IVTV_ERR(" subsystem vendor/device: [%04x:%04x]\n", itv->pdev->subsystem_vendor, itv->pdev->subsystem_device); IVTV_ERR(" %s based\n", chipname); IVTV_ERR("Defaulting to %s card\n", itv->card->name); IVTV_ERR("Please mail the vendor/device and subsystem vendor/device IDs and what kind of\n"); IVTV_ERR("card you have to the ivtv-devel mailinglist (www.ivtvdriver.org)\n"); IVTV_ERR("Prefix your subject line with [UNKNOWN IVTV CARD].\n"); } itv->v4l2_cap = itv->card->v4l2_capabilities; itv->card_name = itv->card->name; itv->card_i2c = itv->card->i2c; } /* Precondition: the ivtv structure has been memset to 0. Only the dev and num fields have been filled in. No assumptions on the card type may be made here (see ivtv_init_struct2 for that). */ static int __devinit ivtv_init_struct1(struct ivtv *itv) { itv->base_addr = pci_resource_start(itv->pdev, 0); itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */ itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */ mutex_init(&itv->serialize_lock); mutex_init(&itv->i2c_bus_lock); mutex_init(&itv->udma.lock); spin_lock_init(&itv->lock); spin_lock_init(&itv->dma_reg_lock); itv->irq_work_queues = create_singlethread_workqueue(itv->v4l2_dev.name); if (itv->irq_work_queues == NULL) { IVTV_ERR("Could not create ivtv workqueue\n"); return -1; } INIT_WORK(&itv->irq_work_queue, ivtv_irq_work_handler); /* start counting open_id at 1 */ itv->open_id = 1; /* Initial settings */ cx2341x_fill_defaults(&itv->params); itv->params.port = CX2341X_PORT_MEMORY; itv->params.capabilities = CX2341X_CAP_HAS_SLICED_VBI; init_waitqueue_head(&itv->eos_waitq); init_waitqueue_head(&itv->event_waitq); init_waitqueue_head(&itv->vsync_waitq); init_waitqueue_head(&itv->dma_waitq); init_timer(&itv->dma_timer); itv->dma_timer.function = ivtv_unfinished_dma; itv->dma_timer.data = (unsigned long)itv; itv->cur_dma_stream = -1; itv->cur_pio_stream = -1; itv->audio_stereo_mode = AUDIO_STEREO; itv->audio_bilingual_mode = AUDIO_MONO_LEFT; /* Ctrls */ itv->speed = 1000; /* VBI */ itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE; itv->vbi.sliced_in = &itv->vbi.in.fmt.sliced; /* Init the sg table for osd/yuv output */ sg_init_table(itv->udma.SGlist, IVTV_DMA_SG_OSD_ENT); /* OSD */ itv->osd_global_alpha_state = 1; itv->osd_global_alpha = 255; /* YUV */ atomic_set(&itv->yuv_info.next_dma_frame, -1); itv->yuv_info.lace_mode = ivtv_yuv_mode; itv->yuv_info.lace_threshold = ivtv_yuv_threshold; itv->yuv_info.max_frames_buffered = 3; itv->yuv_info.track_osd = 1; return 0; } /* Second initialization part. Here the card type has been autodetected. */ static void __devinit ivtv_init_struct2(struct ivtv *itv) { int i; for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++) if (itv->card->video_inputs[i].video_type == 0) break; itv->nof_inputs = i; for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++) if (itv->card->audio_inputs[i].audio_type == 0) break; itv->nof_audio_inputs = i; if (itv->card->hw_all & IVTV_HW_CX25840) { itv->vbi.sliced_size = 288; /* multiple of 16, real size = 284 */ } else { itv->vbi.sliced_size = 64; /* multiple of 16, real size = 52 */ } /* Find tuner input */ for (i = 0; i < itv->nof_inputs; i++) { if (itv->card->video_inputs[i].video_type == IVTV_CARD_INPUT_VID_TUNER) break; } if (i == itv->nof_inputs) i = 0; itv->active_input = i; itv->audio_input = itv->card->video_inputs[i].audio_index; } static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev, const struct pci_device_id *pci_id) { u16 cmd; u8 card_rev; unsigned char pci_latency; IVTV_DEBUG_INFO("Enabling pci device\n"); if (pci_enable_device(pdev)) { IVTV_ERR("Can't enable device!\n"); return -EIO; } if (pci_set_dma_mask(pdev, 0xffffffff)) { IVTV_ERR("No suitable DMA available.\n"); return -EIO; } if (!request_mem_region(itv->base_addr, IVTV_ENCODER_SIZE, "ivtv encoder")) { IVTV_ERR("Cannot request encoder memory region.\n"); return -EIO; } if (!request_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE, "ivtv registers")) { IVTV_ERR("Cannot request register memory region.\n"); release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); return -EIO; } if (itv->has_cx23415 && !request_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE, "ivtv decoder")) { IVTV_ERR("Cannot request decoder memory region.\n"); release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); return -EIO; } /* Check for bus mastering */ pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (!(cmd & PCI_COMMAND_MASTER)) { IVTV_DEBUG_INFO("Attempting to enable Bus Mastering\n"); pci_set_master(pdev); pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (!(cmd & PCI_COMMAND_MASTER)) { IVTV_ERR("Bus Mastering is not enabled\n"); return -ENXIO; } } IVTV_DEBUG_INFO("Bus Mastering Enabled.\n"); pci_read_config_byte(pdev, PCI_CLASS_REVISION, &card_rev); pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); if (pci_latency < 64 && ivtv_pci_latency) { IVTV_INFO("Unreasonably low latency timer, " "setting to 64 (was %d)\n", pci_latency); pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); } /* This config space value relates to DMA latencies. The default value 0x8080 is too low however and will lead to DMA errors. 0xffff is the max value which solves these problems. */ pci_write_config_dword(pdev, 0x40, 0xffff); IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, " "irq: %d, latency: %d, memory: 0x%lx\n", pdev->device, card_rev, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev->irq, pci_latency, (unsigned long)itv->base_addr); return 0; } static void ivtv_load_and_init_modules(struct ivtv *itv) { u32 hw = itv->card->hw_all; unsigned i; /* check which i2c devices are actually found */ for (i = 0; i < 32; i++) { u32 device = 1 << i; if (!(device & hw)) continue; if (device == IVTV_HW_GPIO || device == IVTV_HW_TVEEPROM) { /* GPIO and TVEEPROM do not use i2c probing */ itv->hw_flags |= device; continue; } if (ivtv_i2c_register(itv, i) == 0) itv->hw_flags |= device; } /* probe for legacy IR controllers that aren't in card definitions */ if ((itv->hw_flags & IVTV_HW_IR_ANY) == 0) ivtv_i2c_new_ir_legacy(itv); if (itv->card->hw_all & IVTV_HW_CX25840) itv->sd_video = ivtv_find_hw(itv, IVTV_HW_CX25840); else if (itv->card->hw_all & IVTV_HW_SAA717X) itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA717X); else if (itv->card->hw_all & IVTV_HW_SAA7114) itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA7114); else itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA7115); itv->sd_audio = ivtv_find_hw(itv, itv->card->hw_audio_ctrl); itv->sd_muxer = ivtv_find_hw(itv, itv->card->hw_muxer); hw = itv->hw_flags; if (itv->card->type == IVTV_CARD_CX23416GYC) { /* Several variations of this card exist, detect which card type should be used. */ if ((hw & (IVTV_HW_UPD64031A | IVTV_HW_UPD6408X)) == 0) itv->card = ivtv_get_card(IVTV_CARD_CX23416GYC_NOGRYCS); else if ((hw & IVTV_HW_UPD64031A) == 0) itv->card = ivtv_get_card(IVTV_CARD_CX23416GYC_NOGR); } else if (itv->card->type == IVTV_CARD_GV_MVPRX || itv->card->type == IVTV_CARD_GV_MVPRX2E) { /* The crystal frequency of GVMVPRX is 24.576MHz */ v4l2_subdev_call(itv->sd_video, video, s_crystal_freq, SAA7115_FREQ_24_576_MHZ, SAA7115_FREQ_FL_UCGC); } if (hw & IVTV_HW_CX25840) { itv->vbi.raw_decoder_line_size = 1444; itv->vbi.raw_decoder_sav_odd_field = 0x20; itv->vbi.raw_decoder_sav_even_field = 0x60; itv->vbi.sliced_decoder_line_size = 272; itv->vbi.sliced_decoder_sav_odd_field = 0xB0; itv->vbi.sliced_decoder_sav_even_field = 0xF0; } if (hw & IVTV_HW_SAA711X) { struct v4l2_dbg_chip_ident v; /* determine the exact saa711x model */ itv->hw_flags &= ~IVTV_HW_SAA711X; v.match.type = V4L2_CHIP_MATCH_I2C_DRIVER; strlcpy(v.match.name, "saa7115", sizeof(v.match.name)); ivtv_call_hw(itv, IVTV_HW_SAA711X, core, g_chip_ident, &v); if (v.ident == V4L2_IDENT_SAA7114) { itv->hw_flags |= IVTV_HW_SAA7114; /* VBI is not yet supported by the saa7114 driver. */ itv->v4l2_cap &= ~(V4L2_CAP_SLICED_VBI_CAPTURE|V4L2_CAP_VBI_CAPTURE); } else { itv->hw_flags |= IVTV_HW_SAA7115; } itv->vbi.raw_decoder_line_size = 1443; itv->vbi.raw_decoder_sav_odd_field = 0x25; itv->vbi.raw_decoder_sav_even_field = 0x62; itv->vbi.sliced_decoder_line_size = 51; itv->vbi.sliced_decoder_sav_odd_field = 0xAB; itv->vbi.sliced_decoder_sav_even_field = 0xEC; } if (hw & IVTV_HW_SAA717X) { itv->vbi.raw_decoder_line_size = 1443; itv->vbi.raw_decoder_sav_odd_field = 0x25; itv->vbi.raw_decoder_sav_even_field = 0x62; itv->vbi.sliced_decoder_line_size = 51; itv->vbi.sliced_decoder_sav_odd_field = 0xAB; itv->vbi.sliced_decoder_sav_even_field = 0xEC; } } static int __devinit ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { int retval = 0; int vbi_buf_size; struct ivtv *itv; itv = kzalloc(sizeof(struct ivtv), GFP_ATOMIC); if (itv == NULL) return -ENOMEM; itv->pdev = pdev; itv->instance = v4l2_device_set_name(&itv->v4l2_dev, "ivtv", &ivtv_instance); retval = v4l2_device_register(&pdev->dev, &itv->v4l2_dev); if (retval) { kfree(itv); return retval; } IVTV_INFO("Initializing card %d\n", itv->instance); ivtv_process_options(itv); if (itv->options.cardtype == -1) { retval = -ENODEV; goto err; } if (ivtv_init_struct1(itv)) { retval = -ENOMEM; goto err; } IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr); /* PCI Device Setup */ retval = ivtv_setup_pci(itv, pdev, pci_id); if (retval == -EIO) goto free_workqueue; if (retval == -ENXIO) goto free_mem; /* map io memory */ IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n", itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE); itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE); if (!itv->enc_mem) { IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n"); IVTV_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n"); retval = -ENOMEM; goto free_mem; } if (itv->has_cx23415) { IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n", itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); if (!itv->dec_mem) { IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n"); IVTV_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n"); retval = -ENOMEM; goto free_mem; } } else { itv->dec_mem = itv->enc_mem; } /* map registers memory */ IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n", itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); itv->reg_mem = ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); if (!itv->reg_mem) { IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n"); IVTV_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n"); retval = -ENOMEM; goto free_io; } retval = ivtv_gpio_init(itv); if (retval) goto free_io; /* active i2c */ IVTV_DEBUG_INFO("activating i2c...\n"); if (init_ivtv_i2c(itv)) { IVTV_ERR("Could not initialize i2c\n"); goto free_io; } if (itv->card->hw_all & IVTV_HW_TVEEPROM) { /* Based on the model number the cardtype may be changed. The PCI IDs are not always reliable. */ ivtv_process_eeprom(itv); } if (itv->card->comment) IVTV_INFO("%s", itv->card->comment); if (itv->card->v4l2_capabilities == 0) { /* card was detected but is not supported */ retval = -ENODEV; goto free_i2c; } if (itv->std == 0) { itv->std = V4L2_STD_NTSC_M; } if (itv->options.tuner == -1) { int i; for (i = 0; i < IVTV_CARD_MAX_TUNERS; i++) { if ((itv->std & itv->card->tuners[i].std) == 0) continue; itv->options.tuner = itv->card->tuners[i].tuner; break; } } /* if no tuner was found, then pick the first tuner in the card list */ if (itv->options.tuner == -1 && itv->card->tuners[0].std) { itv->std = itv->card->tuners[0].std; if (itv->std & V4L2_STD_PAL) itv->std = V4L2_STD_PAL_BG | V4L2_STD_PAL_H; else if (itv->std & V4L2_STD_NTSC) itv->std = V4L2_STD_NTSC_M; else if (itv->std & V4L2_STD_SECAM) itv->std = V4L2_STD_SECAM_L; itv->options.tuner = itv->card->tuners[0].tuner; } if (itv->options.radio == -1) itv->options.radio = (itv->card->radio_input.audio_type != 0); /* The card is now fully identified, continue with card-specific initialization. */ ivtv_init_struct2(itv); ivtv_load_and_init_modules(itv); if (itv->std & V4L2_STD_525_60) { itv->is_60hz = 1; itv->is_out_60hz = 1; } else { itv->is_50hz = 1; itv->is_out_50hz = 1; } itv->yuv_info.osd_full_w = 720; itv->yuv_info.osd_full_h = itv->is_out_50hz ? 576 : 480; itv->yuv_info.v4l2_src_w = itv->yuv_info.osd_full_w; itv->yuv_info.v4l2_src_h = itv->yuv_info.osd_full_h; itv->params.video_gop_size = itv->is_60hz ? 15 : 12; itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_MPG] = 0x08000; itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_PCM] = 0x01200; itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_MPG] = 0x10000; itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_YUV] = 0x10000; itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_YUV] = 0x08000; /* Setup VBI Raw Size. Should be big enough to hold PAL. It is possible to switch between PAL and NTSC, so we need to take the largest size here. */ /* 1456 is multiple of 16, real size = 1444 */ itv->vbi.raw_size = 1456; /* We use a buffer size of 1/2 of the total size needed for a frame. This is actually very useful, since we now receive a field at a time and that makes 'compressing' the raw data down to size by stripping off the SAV codes a lot easier. Note: having two different buffer sizes prevents standard switching on the fly. We need to find a better solution... */ vbi_buf_size = itv->vbi.raw_size * (itv->is_60hz ? 24 : 36) / 2; itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_VBI] = vbi_buf_size; itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_VBI] = sizeof(struct v4l2_sliced_vbi_data) * 36; if (itv->options.radio > 0) itv->v4l2_cap |= V4L2_CAP_RADIO; if (itv->options.tuner > -1) { struct tuner_setup setup; setup.addr = ADDR_UNSET; setup.type = itv->options.tuner; setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */ setup.tuner_callback = (setup.type == TUNER_XC2028) ? ivtv_reset_tuner_gpio : NULL; ivtv_call_all(itv, tuner, s_type_addr, &setup); if (setup.type == TUNER_XC2028) { static struct xc2028_ctrl ctrl = { .fname = XC2028_DEFAULT_FIRMWARE, .max_len = 64, }; struct v4l2_priv_tun_config cfg = { .tuner = itv->options.tuner, .priv = &ctrl, }; ivtv_call_all(itv, tuner, s_config, &cfg); } } /* The tuner is fixed to the standard. The other inputs (e.g. S-Video) are not. */ itv->tuner_std = itv->std; if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { ivtv_call_all(itv, video, s_std_output, itv->std); /* Turn off the output signal. The mpeg decoder is not yet active so without this you would get a green image until the mpeg decoder becomes active. */ ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 0); } /* clear interrupt mask, effectively disabling interrupts */ ivtv_set_irq_mask(itv, 0xffffffff); /* Register IRQ */ retval = request_irq(itv->pdev->irq, ivtv_irq_handler, IRQF_SHARED | IRQF_DISABLED, itv->v4l2_dev.name, (void *)itv); if (retval) { IVTV_ERR("Failed to register irq %d\n", retval); goto free_i2c; } retval = ivtv_streams_setup(itv); if (retval) { IVTV_ERR("Error %d setting up streams\n", retval); goto free_irq; } retval = ivtv_streams_register(itv); if (retval) { IVTV_ERR("Error %d registering devices\n", retval); goto free_streams; } IVTV_INFO("Initialized card: %s\n", itv->card_name); return 0; free_streams: ivtv_streams_cleanup(itv, 1); free_irq: free_irq(itv->pdev->irq, (void *)itv); free_i2c: exit_ivtv_i2c(itv); free_io: ivtv_iounmap(itv); free_mem: release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); if (itv->has_cx23415) release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); free_workqueue: destroy_workqueue(itv->irq_work_queues); err: if (retval == 0) retval = -ENODEV; IVTV_ERR("Error %d on initialization\n", retval); v4l2_device_unregister(&itv->v4l2_dev); kfree(itv); return retval; } int ivtv_init_on_first_open(struct ivtv *itv) { struct v4l2_frequency vf; /* Needed to call ioctls later */ struct ivtv_open_id fh; int fw_retry_count = 3; int video_input; fh.itv = itv; if (test_bit(IVTV_F_I_FAILED, &itv->i_flags)) return -ENXIO; if (test_and_set_bit(IVTV_F_I_INITED, &itv->i_flags)) return 0; while (--fw_retry_count > 0) { /* load firmware */ if (ivtv_firmware_init(itv) == 0) break; if (fw_retry_count > 1) IVTV_WARN("Retry loading firmware\n"); } if (fw_retry_count == 0) { set_bit(IVTV_F_I_FAILED, &itv->i_flags); return -ENXIO; } /* Try and get firmware versions */ IVTV_DEBUG_INFO("Getting firmware version..\n"); ivtv_firmware_versions(itv); if (itv->card->hw_all & IVTV_HW_CX25840) { struct v4l2_control ctrl; v4l2_subdev_call(itv->sd_video, core, load_fw); /* CX25840_CID_ENABLE_PVR150_WORKAROUND */ ctrl.id = V4L2_CID_PRIVATE_BASE; ctrl.value = itv->pvr150_workaround; v4l2_subdev_call(itv->sd_video, core, s_ctrl, &ctrl); } vf.tuner = 0; vf.type = V4L2_TUNER_ANALOG_TV; vf.frequency = 6400; /* the tuner 'baseline' frequency */ /* Set initial frequency. For PAL/SECAM broadcasts no 'default' channel exists AFAIK. */ if (itv->std == V4L2_STD_NTSC_M_JP) { vf.frequency = 1460; /* ch. 1 91250*16/1000 */ } else if (itv->std & V4L2_STD_NTSC_M) { vf.frequency = 1076; /* ch. 4 67250*16/1000 */ } video_input = itv->active_input; itv->active_input++; /* Force update of input */ ivtv_s_input(NULL, &fh, video_input); /* Let the VIDIOC_S_STD ioctl do all the work, keeps the code in one place. */ itv->std++; /* Force full standard initialization */ itv->std_out = itv->std; ivtv_s_frequency(NULL, &fh, &vf); if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) { /* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes the mpeg decoder so now the saa7127 receives a proper signal. */ ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1); ivtv_init_mpeg_decoder(itv); } /* On a cx23416 this seems to be able to enable DMA to the chip? */ if (!itv->has_cx23415) write_reg_sync(0x03, IVTV_REG_DMACONTROL); /* Default interrupts enabled. For the PVR350 this includes the decoder VSYNC interrupt, which is always on. It is not only used during decoding but also by the OSD. Some old PVR250 cards had a cx23415, so testing for that is too general. Instead test if the card has video output capability. */ if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC); ivtv_set_osd_alpha(itv); } else ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT); /* For cards with video out, this call needs interrupts enabled */ ivtv_s_std(NULL, &fh, &itv->tuner_std); return 0; } static void ivtv_remove(struct pci_dev *pdev) { struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev); struct ivtv *itv = to_ivtv(v4l2_dev); int i; IVTV_DEBUG_INFO("Removing card\n"); if (test_bit(IVTV_F_I_INITED, &itv->i_flags)) { /* Stop all captures */ IVTV_DEBUG_INFO("Stopping all streams\n"); if (atomic_read(&itv->capturing) > 0) ivtv_stop_all_captures(itv); /* Stop all decoding */ IVTV_DEBUG_INFO("Stopping decoding\n"); /* Turn off the TV-out */ if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 0); if (atomic_read(&itv->decoding) > 0) { int type; if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) type = IVTV_DEC_STREAM_TYPE_YUV; else type = IVTV_DEC_STREAM_TYPE_MPG; ivtv_stop_v4l2_decode_stream(&itv->streams[type], VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0); } ivtv_halt_firmware(itv); } /* Interrupts */ ivtv_set_irq_mask(itv, 0xffffffff); del_timer_sync(&itv->dma_timer); /* Stop all Work Queues */ flush_workqueue(itv->irq_work_queues); destroy_workqueue(itv->irq_work_queues); ivtv_streams_cleanup(itv, 1); ivtv_udma_free(itv); exit_ivtv_i2c(itv); free_irq(itv->pdev->irq, (void *)itv); ivtv_iounmap(itv); release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); if (itv->has_cx23415) release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); pci_disable_device(itv->pdev); for (i = 0; i < IVTV_VBI_FRAMES; i++) kfree(itv->vbi.sliced_mpeg_data[i]); printk(KERN_INFO "ivtv: Removed %s\n", itv->card_name); v4l2_device_unregister(&itv->v4l2_dev); kfree(itv); } /* define a pci_driver for card detection */ static struct pci_driver ivtv_pci_driver = { .name = "ivtv", .id_table = ivtv_pci_tbl, .probe = ivtv_probe, .remove = ivtv_remove, }; static int __init module_start(void) { printk(KERN_INFO "ivtv: Start initialization, version %s\n", IVTV_VERSION); /* Validate parameters */ if (ivtv_first_minor < 0 || ivtv_first_minor >= IVTV_MAX_CARDS) { printk(KERN_ERR "ivtv: Exiting, ivtv_first_minor must be between 0 and %d\n", IVTV_MAX_CARDS - 1); return -1; } if (ivtv_debug < 0 || ivtv_debug > 2047) { ivtv_debug = 0; printk(KERN_INFO "ivtv: Debug value must be >= 0 and <= 2047\n"); } if (pci_register_driver(&ivtv_pci_driver)) { printk(KERN_ERR "ivtv: Error detecting PCI card\n"); return -ENODEV; } printk(KERN_INFO "ivtv: End initialization\n"); return 0; } static void __exit module_cleanup(void) { pci_unregister_driver(&ivtv_pci_driver); } /* Note: These symbols are exported because they are used by the ivtvfb framebuffer module and an infrared module for the IR-blaster. */ EXPORT_SYMBOL(ivtv_set_irq_mask); EXPORT_SYMBOL(ivtv_api); EXPORT_SYMBOL(ivtv_vapi); EXPORT_SYMBOL(ivtv_vapi_result); EXPORT_SYMBOL(ivtv_clear_irq_mask); EXPORT_SYMBOL(ivtv_debug); EXPORT_SYMBOL(ivtv_reset_ir_gpio); EXPORT_SYMBOL(ivtv_udma_setup); EXPORT_SYMBOL(ivtv_udma_unmap); EXPORT_SYMBOL(ivtv_udma_alloc); EXPORT_SYMBOL(ivtv_udma_prepare); EXPORT_SYMBOL(ivtv_init_on_first_open); module_init(module_start); module_exit(module_cleanup);
gpl-2.0
Renesas-EMEV2/Kernel
drivers/net/sunbmac.c
760
34051
/* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters. * * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/gfp.h> #include <asm/auxio.h> #include <asm/byteorder.h> #include <asm/dma.h> #include <asm/idprom.h> #include <asm/io.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/pgtable.h> #include <asm/system.h> #include "sunbmac.h" #define DRV_NAME "sunbmac" #define DRV_VERSION "2.1" #define DRV_RELDATE "August 26, 2008" #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver"); MODULE_LICENSE("GPL"); #undef DEBUG_PROBE #undef DEBUG_TX #undef DEBUG_IRQ #ifdef DEBUG_PROBE #define DP(x) printk x #else #define DP(x) #endif #ifdef DEBUG_TX #define DTX(x) printk x #else #define DTX(x) #endif #ifdef DEBUG_IRQ #define DIRQ(x) printk x #else #define DIRQ(x) #endif #define DEFAULT_JAMSIZE 4 /* Toe jam */ #define QEC_RESET_TRIES 200 static int qec_global_reset(void __iomem *gregs) { int tries = QEC_RESET_TRIES; sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); while (--tries) { if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) { udelay(20); continue; } break; } if (tries) return 0; printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n"); return -1; } static void qec_init(struct bigmac *bp) { struct of_device *qec_op = bp->qec_op; void __iomem *gregs = bp->gregs; u8 bsizes = bp->bigmac_bursts; u32 regval; /* 64byte bursts do not work at the moment, do * not even try to enable them. -DaveM */ if (bsizes & DMA_BURST32) regval = GLOB_CTRL_B32; else regval = GLOB_CTRL_B16; sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL); sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE); /* All of memsize is given to bigmac. */ sbus_writel(resource_size(&qec_op->resource[1]), gregs + GLOB_MSIZE); /* Half to the transmitter, half to the receiver. */ sbus_writel(resource_size(&qec_op->resource[1]) >> 1, gregs + GLOB_TSIZE); sbus_writel(resource_size(&qec_op->resource[1]) >> 1, gregs + GLOB_RSIZE); } #define TX_RESET_TRIES 32 #define RX_RESET_TRIES 32 static void bigmac_tx_reset(void __iomem *bregs) { int tries = TX_RESET_TRIES; sbus_writel(0, bregs + BMAC_TXCFG); /* The fifo threshold bit is read-only and does * not clear. -DaveM */ while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 && --tries != 0) udelay(20); if (!tries) { printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n"); printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n", sbus_readl(bregs + BMAC_TXCFG)); } } static void bigmac_rx_reset(void __iomem *bregs) { int tries = RX_RESET_TRIES; sbus_writel(0, bregs + BMAC_RXCFG); while (sbus_readl(bregs + BMAC_RXCFG) && --tries) udelay(20); if (!tries) { printk(KERN_ERR "BIGMAC: Receiver will not reset.\n"); printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n", sbus_readl(bregs + BMAC_RXCFG)); } } /* Reset the transmitter and receiver. */ static void bigmac_stop(struct bigmac *bp) { bigmac_tx_reset(bp->bregs); bigmac_rx_reset(bp->bregs); } static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) { struct net_device_stats *stats = &bp->enet_stats; stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR); sbus_writel(0, bregs + BMAC_RCRCECTR); stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR); sbus_writel(0, bregs + BMAC_UNALECTR); stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR); sbus_writel(0, bregs + BMAC_GLECTR); stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR); stats->collisions += (sbus_readl(bregs + BMAC_EXCTR) + sbus_readl(bregs + BMAC_LTCTR)); sbus_writel(0, bregs + BMAC_EXCTR); sbus_writel(0, bregs + BMAC_LTCTR); } static void bigmac_clean_rings(struct bigmac *bp) { int i; for (i = 0; i < RX_RING_SIZE; i++) { if (bp->rx_skbs[i] != NULL) { dev_kfree_skb_any(bp->rx_skbs[i]); bp->rx_skbs[i] = NULL; } } for (i = 0; i < TX_RING_SIZE; i++) { if (bp->tx_skbs[i] != NULL) { dev_kfree_skb_any(bp->tx_skbs[i]); bp->tx_skbs[i] = NULL; } } } static void bigmac_init_rings(struct bigmac *bp, int from_irq) { struct bmac_init_block *bb = bp->bmac_block; struct net_device *dev = bp->dev; int i; gfp_t gfp_flags = GFP_KERNEL; if (from_irq || in_interrupt()) gfp_flags = GFP_ATOMIC; bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; /* Free any skippy bufs left around in the rings. */ bigmac_clean_rings(bp); /* Now get new skbufs for the receive ring. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags); if (!skb) continue; bp->rx_skbs[i] = skb; skb->dev = dev; /* Because we reserve afterwards. */ skb_put(skb, ETH_FRAME_LEN); skb_reserve(skb, 34); bb->be_rxd[i].rx_addr = dma_map_single(&bp->bigmac_op->dev, skb->data, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); bb->be_rxd[i].rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); } for (i = 0; i < TX_RING_SIZE; i++) bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0; } #define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK) #define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB) static void idle_transceiver(void __iomem *tregs) { int i = 20; while (i--) { sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } } static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit) { if (bp->tcvr_type == internal) { bit = (bit & 1) << 3; sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO), tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else if (bp->tcvr_type == external) { bit = (bit & 1) << 2; sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else { printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n"); } } static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs) { int retval = 0; if (bp->tcvr_type == internal) { sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; } else if (bp->tcvr_type == external) { sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; } else { printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n"); } return retval; } static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs) { int retval = 0; if (bp->tcvr_type == internal) { sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else if (bp->tcvr_type == external) { sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else { printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n"); } return retval; } static void put_tcvr_byte(struct bigmac *bp, void __iomem *tregs, unsigned int byte) { int shift = 4; do { write_tcvr_bit(bp, tregs, ((byte >> shift) & 1)); shift -= 1; } while (shift >= 0); } static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs, int reg, unsigned short val) { int shift; reg &= 0xff; val &= 0xffff; switch(bp->tcvr_type) { case internal: case external: break; default: printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); return; } idle_transceiver(tregs); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); put_tcvr_byte(bp, tregs, ((bp->tcvr_type == internal) ? BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); put_tcvr_byte(bp, tregs, reg); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); shift = 15; do { write_tcvr_bit(bp, tregs, (val >> shift) & 1); shift -= 1; } while (shift >= 0); } static unsigned short bigmac_tcvr_read(struct bigmac *bp, void __iomem *tregs, int reg) { unsigned short retval = 0; reg &= 0xff; switch(bp->tcvr_type) { case internal: case external: break; default: printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); return 0xffff; } idle_transceiver(tregs); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); put_tcvr_byte(bp, tregs, ((bp->tcvr_type == internal) ? BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); put_tcvr_byte(bp, tregs, reg); if (bp->tcvr_type == external) { int shift = 15; (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); do { int tmp; tmp = read_tcvr_bit2(bp, tregs); retval |= ((tmp & 1) << shift); shift -= 1; } while (shift >= 0); (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); } else { int shift = 15; (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); do { int tmp; tmp = read_tcvr_bit(bp, tregs); retval |= ((tmp & 1) << shift); shift -= 1; } while (shift >= 0); (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); } return retval; } static void bigmac_tcvr_init(struct bigmac *bp) { void __iomem *tregs = bp->tregs; u32 mpal; idle_transceiver(tregs); sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); /* Only the bit for the present transceiver (internal or * external) will stick, set them both and see what stays. */ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); udelay(20); mpal = sbus_readl(tregs + TCVR_MPAL); if (mpal & MGMT_PAL_EXT_MDIO) { bp->tcvr_type = external; sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), tregs + TCVR_TPAL); sbus_readl(tregs + TCVR_TPAL); } else if (mpal & MGMT_PAL_INT_MDIO) { bp->tcvr_type = internal; sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), tregs + TCVR_TPAL); sbus_readl(tregs + TCVR_TPAL); } else { printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor " "external MDIO available!\n"); printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n", sbus_readl(tregs + TCVR_MPAL), sbus_readl(tregs + TCVR_TPAL)); } } static int bigmac_init_hw(struct bigmac *, int); static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) { if (bp->sw_bmcr & BMCR_SPEED100) { int timeout; /* Reset the PHY. */ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); bp->sw_bmcr = (BMCR_RESET); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); timeout = 64; while (--timeout) { bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); if ((bp->sw_bmcr & BMCR_RESET) == 0) break; udelay(20); } if (timeout == 0) printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); /* Now we try 10baseT. */ bp->sw_bmcr &= ~(BMCR_SPEED100); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); return 0; } /* We've tried them all. */ return -1; } static void bigmac_timer(unsigned long data) { struct bigmac *bp = (struct bigmac *) data; void __iomem *tregs = bp->tregs; int restart_timer = 0; bp->timer_ticks++; if (bp->timer_state == ltrywait) { bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); if (bp->sw_bmsr & BMSR_LSTATUS) { printk(KERN_INFO "%s: Link is now up at %s.\n", bp->dev->name, (bp->sw_bmcr & BMCR_SPEED100) ? "100baseT" : "10baseT"); bp->timer_state = asleep; restart_timer = 0; } else { if (bp->timer_ticks >= 4) { int ret; ret = try_next_permutation(bp, tregs); if (ret == -1) { printk(KERN_ERR "%s: Link down, cable problem?\n", bp->dev->name); ret = bigmac_init_hw(bp, 0); if (ret) { printk(KERN_ERR "%s: Error, cannot re-init the " "BigMAC.\n", bp->dev->name); } return; } bp->timer_ticks = 0; restart_timer = 1; } else { restart_timer = 1; } } } else { /* Can't happens.... */ printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", bp->dev->name); restart_timer = 0; bp->timer_ticks = 0; bp->timer_state = asleep; /* foo on you */ } if (restart_timer != 0) { bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ add_timer(&bp->bigmac_timer); } } /* Well, really we just force the chip into 100baseT then * 10baseT, each time checking for a link status. */ static void bigmac_begin_auto_negotiation(struct bigmac *bp) { void __iomem *tregs = bp->tregs; int timeout; /* Grab new software copies of PHY registers. */ bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); /* Reset the PHY. */ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); bp->sw_bmcr = (BMCR_RESET); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); timeout = 64; while (--timeout) { bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); if ((bp->sw_bmcr & BMCR_RESET) == 0) break; udelay(20); } if (timeout == 0) printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); /* First we try 100baseT. */ bp->sw_bmcr |= BMCR_SPEED100; bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); bp->timer_state = ltrywait; bp->timer_ticks = 0; bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; bp->bigmac_timer.data = (unsigned long) bp; bp->bigmac_timer.function = &bigmac_timer; add_timer(&bp->bigmac_timer); } static int bigmac_init_hw(struct bigmac *bp, int from_irq) { void __iomem *gregs = bp->gregs; void __iomem *cregs = bp->creg; void __iomem *bregs = bp->bregs; unsigned char *e = &bp->dev->dev_addr[0]; /* Latch current counters into statistics. */ bigmac_get_counters(bp, bregs); /* Reset QEC. */ qec_global_reset(gregs); /* Init QEC. */ qec_init(bp); /* Alloc and reset the tx/rx descriptor chains. */ bigmac_init_rings(bp, from_irq); /* Initialize the PHY. */ bigmac_tcvr_init(bp); /* Stop transmitter and receiver. */ bigmac_stop(bp); /* Set hardware ethernet address. */ sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2); sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1); sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0); /* Clear the hash table until mc upload occurs. */ sbus_writel(0, bregs + BMAC_HTABLE3); sbus_writel(0, bregs + BMAC_HTABLE2); sbus_writel(0, bregs + BMAC_HTABLE1); sbus_writel(0, bregs + BMAC_HTABLE0); /* Enable Big Mac hash table filter. */ sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO, bregs + BMAC_RXCFG); udelay(20); /* Ok, configure the Big Mac transmitter. */ sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG); /* The HME docs recommend to use the 10LSB of our MAC here. */ sbus_writel(((e[5] | e[4] << 8) & 0x3ff), bregs + BMAC_RSEED); /* Enable the output drivers no matter what. */ sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV, bregs + BMAC_XIFCFG); /* Tell the QEC where the ring descriptors are. */ sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), cregs + CREG_RXDS); sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), cregs + CREG_TXDS); /* Setup the FIFO pointers into QEC local memory. */ sbus_writel(0, cregs + CREG_RXRBUFPTR); sbus_writel(0, cregs + CREG_RXWBUFPTR); sbus_writel(sbus_readl(gregs + GLOB_RSIZE), cregs + CREG_TXRBUFPTR); sbus_writel(sbus_readl(gregs + GLOB_RSIZE), cregs + CREG_TXWBUFPTR); /* Tell bigmac what interrupts we don't want to hear about. */ sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME, bregs + BMAC_IMASK); /* Enable the various other irq's. */ sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(0, cregs + CREG_TIMASK); sbus_writel(0, cregs + CREG_QMASK); sbus_writel(0, cregs + CREG_BMASK); /* Set jam size to a reasonable default. */ sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE); /* Clear collision counter. */ sbus_writel(0, cregs + CREG_CCNT); /* Enable transmitter and receiver. */ sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE, bregs + BMAC_TXCFG); sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE, bregs + BMAC_RXCFG); /* Ok, start detecting link speed/duplex. */ bigmac_begin_auto_negotiation(bp); /* Success. */ return 0; } /* Error interrupts get sent here. */ static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status) { printk(KERN_ERR "bigmac_is_medium_rare: "); if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) { if (qec_status & GLOB_STAT_ER) printk("QEC_ERROR, "); if (qec_status & GLOB_STAT_BM) printk("QEC_BMAC_ERROR, "); } if (bmac_status & CREG_STAT_ERRORS) { if (bmac_status & CREG_STAT_BERROR) printk("BMAC_ERROR, "); if (bmac_status & CREG_STAT_TXDERROR) printk("TXD_ERROR, "); if (bmac_status & CREG_STAT_TXLERR) printk("TX_LATE_ERROR, "); if (bmac_status & CREG_STAT_TXPERR) printk("TX_PARITY_ERROR, "); if (bmac_status & CREG_STAT_TXSERR) printk("TX_SBUS_ERROR, "); if (bmac_status & CREG_STAT_RXDROP) printk("RX_DROP_ERROR, "); if (bmac_status & CREG_STAT_RXSMALL) printk("RX_SMALL_ERROR, "); if (bmac_status & CREG_STAT_RXLERR) printk("RX_LATE_ERROR, "); if (bmac_status & CREG_STAT_RXPERR) printk("RX_PARITY_ERROR, "); if (bmac_status & CREG_STAT_RXSERR) printk("RX_SBUS_ERROR, "); } printk(" RESET\n"); bigmac_init_hw(bp, 1); } /* BigMAC transmit complete service routines. */ static void bigmac_tx(struct bigmac *bp) { struct be_txd *txbase = &bp->bmac_block->be_txd[0]; struct net_device *dev = bp->dev; int elem; spin_lock(&bp->lock); elem = bp->tx_old; DTX(("bigmac_tx: tx_old[%d] ", elem)); while (elem != bp->tx_new) { struct sk_buff *skb; struct be_txd *this = &txbase[elem]; DTX(("this(%p) [flags(%08x)addr(%08x)]", this, this->tx_flags, this->tx_addr)); if (this->tx_flags & TXD_OWN) break; skb = bp->tx_skbs[elem]; bp->enet_stats.tx_packets++; bp->enet_stats.tx_bytes += skb->len; dma_unmap_single(&bp->bigmac_op->dev, this->tx_addr, skb->len, DMA_TO_DEVICE); DTX(("skb(%p) ", skb)); bp->tx_skbs[elem] = NULL; dev_kfree_skb_irq(skb); elem = NEXT_TX(elem); } DTX((" DONE, tx_old=%d\n", elem)); bp->tx_old = elem; if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL(bp) > 0) netif_wake_queue(bp->dev); spin_unlock(&bp->lock); } /* BigMAC receive complete service routines. */ static void bigmac_rx(struct bigmac *bp) { struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0]; struct be_rxd *this; int elem = bp->rx_new, drops = 0; u32 flags; this = &rxbase[elem]; while (!((flags = this->rx_flags) & RXD_OWN)) { struct sk_buff *skb; int len = (flags & RXD_LENGTH); /* FCS not included */ /* Check for errors. */ if (len < ETH_ZLEN) { bp->enet_stats.rx_errors++; bp->enet_stats.rx_length_errors++; drop_it: /* Return it to the BigMAC. */ bp->enet_stats.rx_dropped++; this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); goto next; } skb = bp->rx_skbs[elem]; if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; /* Now refill the entry, if we can. */ new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (new_skb == NULL) { drops++; goto drop_it; } dma_unmap_single(&bp->bigmac_op->dev, this->rx_addr, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); bp->rx_skbs[elem] = new_skb; new_skb->dev = bp->dev; skb_put(new_skb, ETH_FRAME_LEN); skb_reserve(new_skb, 34); this->rx_addr = dma_map_single(&bp->bigmac_op->dev, new_skb->data, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = dev_alloc_skb(len + 2); if (copy_skb == NULL) { drops++; goto drop_it; } skb_reserve(copy_skb, 2); skb_put(copy_skb, len); dma_sync_single_for_cpu(&bp->bigmac_op->dev, this->rx_addr, len, DMA_FROM_DEVICE); skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); dma_sync_single_for_device(&bp->bigmac_op->dev, this->rx_addr, len, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); skb = copy_skb; } /* No checksums done by the BigMAC ;-( */ skb->protocol = eth_type_trans(skb, bp->dev); netif_rx(skb); bp->enet_stats.rx_packets++; bp->enet_stats.rx_bytes += len; next: elem = NEXT_RX(elem); this = &rxbase[elem]; } bp->rx_new = elem; if (drops) printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name); } static irqreturn_t bigmac_interrupt(int irq, void *dev_id) { struct bigmac *bp = (struct bigmac *) dev_id; u32 qec_status, bmac_status; DIRQ(("bigmac_interrupt: ")); /* Latch status registers now. */ bmac_status = sbus_readl(bp->creg + CREG_STAT); qec_status = sbus_readl(bp->gregs + GLOB_STAT); DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status)); if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) || (bmac_status & CREG_STAT_ERRORS)) bigmac_is_medium_rare(bp, qec_status, bmac_status); if (bmac_status & CREG_STAT_TXIRQ) bigmac_tx(bp); if (bmac_status & CREG_STAT_RXIRQ) bigmac_rx(bp); return IRQ_HANDLED; } static int bigmac_open(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int ret; ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp); if (ret) { printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq); return ret; } init_timer(&bp->bigmac_timer); ret = bigmac_init_hw(bp, 0); if (ret) free_irq(dev->irq, bp); return ret; } static int bigmac_close(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); del_timer(&bp->bigmac_timer); bp->timer_state = asleep; bp->timer_ticks = 0; bigmac_stop(bp); bigmac_clean_rings(bp); free_irq(dev->irq, bp); return 0; } static void bigmac_tx_timeout(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); bigmac_init_hw(bp, 0); netif_wake_queue(dev); } /* Put a packet on the wire. */ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int len, entry; u32 mapping; len = skb->len; mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, len, DMA_TO_DEVICE); /* Avoid a race... */ spin_lock_irq(&bp->lock); entry = bp->tx_new; DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry)); bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; bp->tx_skbs[entry] = skb; bp->bmac_block->be_txd[entry].tx_addr = mapping; bp->bmac_block->be_txd[entry].tx_flags = (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); bp->tx_new = NEXT_TX(entry); if (TX_BUFFS_AVAIL(bp) <= 0) netif_stop_queue(dev); spin_unlock_irq(&bp->lock); /* Get it going. */ sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); return NETDEV_TX_OK; } static struct net_device_stats *bigmac_get_stats(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); bigmac_get_counters(bp, bp->bregs); return &bp->enet_stats; } static void bigmac_set_multicast(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); void __iomem *bregs = bp->bregs; struct netdev_hw_addr *ha; char *addrs; int i; u32 tmp, crc; /* Disable the receiver. The bit self-clears when * the operation is complete. */ tmp = sbus_readl(bregs + BMAC_RXCFG); tmp &= ~(BIGMAC_RXCFG_ENABLE); sbus_writel(tmp, bregs + BMAC_RXCFG); while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0) udelay(20); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { sbus_writel(0xffff, bregs + BMAC_HTABLE0); sbus_writel(0xffff, bregs + BMAC_HTABLE1); sbus_writel(0xffff, bregs + BMAC_HTABLE2); sbus_writel(0xffff, bregs + BMAC_HTABLE3); } else if (dev->flags & IFF_PROMISC) { tmp = sbus_readl(bregs + BMAC_RXCFG); tmp |= BIGMAC_RXCFG_PMISC; sbus_writel(tmp, bregs + BMAC_RXCFG); } else { u16 hash_table[4]; for (i = 0; i < 4; i++) hash_table[i] = 0; netdev_for_each_mc_addr(ha, dev) { addrs = ha->addr; if (!(*addrs & 1)) continue; crc = ether_crc_le(6, addrs); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } sbus_writel(hash_table[0], bregs + BMAC_HTABLE0); sbus_writel(hash_table[1], bregs + BMAC_HTABLE1); sbus_writel(hash_table[2], bregs + BMAC_HTABLE2); sbus_writel(hash_table[3], bregs + BMAC_HTABLE3); } /* Re-enable the receiver. */ tmp = sbus_readl(bregs + BMAC_RXCFG); tmp |= BIGMAC_RXCFG_ENABLE; sbus_writel(tmp, bregs + BMAC_RXCFG); } /* Ethtool support... */ static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "sunbmac"); strcpy(info->version, "2.0"); } static u32 bigmac_get_link(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); spin_lock_irq(&bp->lock); bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, BIGMAC_BMSR); spin_unlock_irq(&bp->lock); return (bp->sw_bmsr & BMSR_LSTATUS); } static const struct ethtool_ops bigmac_ethtool_ops = { .get_drvinfo = bigmac_get_drvinfo, .get_link = bigmac_get_link, }; static const struct net_device_ops bigmac_ops = { .ndo_open = bigmac_open, .ndo_stop = bigmac_close, .ndo_start_xmit = bigmac_start_xmit, .ndo_get_stats = bigmac_get_stats, .ndo_set_multicast_list = bigmac_set_multicast, .ndo_tx_timeout = bigmac_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int __devinit bigmac_ether_init(struct of_device *op, struct of_device *qec_op) { static int version_printed; struct net_device *dev; u8 bsizes, bsizes_more; struct bigmac *bp; int i; /* Get a new device struct for this interface. */ dev = alloc_etherdev(sizeof(struct bigmac)); if (!dev) return -ENOMEM; if (version_printed++ == 0) printk(KERN_INFO "%s", version); for (i = 0; i < 6; i++) dev->dev_addr[i] = idprom->id_ethaddr[i]; /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */ bp = netdev_priv(dev); bp->qec_op = qec_op; bp->bigmac_op = op; SET_NETDEV_DEV(dev, &op->dev); spin_lock_init(&bp->lock); /* Map in QEC global control registers. */ bp->gregs = of_ioremap(&qec_op->resource[0], 0, GLOB_REG_SIZE, "BigMAC QEC GLobal Regs"); if (!bp->gregs) { printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n"); goto fail_and_cleanup; } /* Make sure QEC is in BigMAC mode. */ if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) { printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n"); goto fail_and_cleanup; } /* Reset the QEC. */ if (qec_global_reset(bp->gregs)) goto fail_and_cleanup; /* Get supported SBUS burst sizes. */ bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); bsizes &= 0xff; if (bsizes_more != 0xff) bsizes &= bsizes_more; if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || (bsizes & DMA_BURST32) == 0) bsizes = (DMA_BURST32 - 1); bp->bigmac_bursts = bsizes; /* Perform QEC initialization. */ qec_init(bp); /* Map in the BigMAC channel registers. */ bp->creg = of_ioremap(&op->resource[0], 0, CREG_REG_SIZE, "BigMAC QEC Channel Regs"); if (!bp->creg) { printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n"); goto fail_and_cleanup; } /* Map in the BigMAC control registers. */ bp->bregs = of_ioremap(&op->resource[1], 0, BMAC_REG_SIZE, "BigMAC Primary Regs"); if (!bp->bregs) { printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n"); goto fail_and_cleanup; } /* Map in the BigMAC transceiver registers, this is how you poke at * the BigMAC's PHY. */ bp->tregs = of_ioremap(&op->resource[2], 0, TCVR_REG_SIZE, "BigMAC Transceiver Regs"); if (!bp->tregs) { printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n"); goto fail_and_cleanup; } /* Stop the BigMAC. */ bigmac_stop(bp); /* Allocate transmit/receive descriptor DVMA block. */ bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, PAGE_SIZE, &bp->bblock_dvma, GFP_ATOMIC); if (bp->bmac_block == NULL || bp->bblock_dvma == 0) { printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n"); goto fail_and_cleanup; } /* Get the board revision of this BigMAC. */ bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, "board-version", 1); /* Init auto-negotiation timer state. */ init_timer(&bp->bigmac_timer); bp->timer_state = asleep; bp->timer_ticks = 0; /* Backlink to generic net device struct. */ bp->dev = dev; /* Set links to our BigMAC open and close routines. */ dev->ethtool_ops = &bigmac_ethtool_ops; dev->netdev_ops = &bigmac_ops; dev->watchdog_timeo = 5*HZ; /* Finish net device registration. */ dev->irq = bp->bigmac_op->irqs[0]; dev->dma = 0; if (register_netdev(dev)) { printk(KERN_ERR "BIGMAC: Cannot register device.\n"); goto fail_and_cleanup; } dev_set_drvdata(&bp->bigmac_op->dev, bp); printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n", dev->name, dev->dev_addr); return 0; fail_and_cleanup: /* Something went wrong, undo whatever we did so far. */ /* Free register mappings if any. */ if (bp->gregs) of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); if (bp->creg) of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); if (bp->bregs) of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); if (bp->tregs) of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); if (bp->bmac_block) dma_free_coherent(&bp->bigmac_op->dev, PAGE_SIZE, bp->bmac_block, bp->bblock_dvma); /* This also frees the co-located private data */ free_netdev(dev); return -ENODEV; } /* QEC can be the parent of either QuadEthernet or a BigMAC. We want * the latter. */ static int __devinit bigmac_sbus_probe(struct of_device *op, const struct of_device_id *match) { struct device *parent = op->dev.parent; struct of_device *qec_op; qec_op = to_of_device(parent); return bigmac_ether_init(op, qec_op); } static int __devexit bigmac_sbus_remove(struct of_device *op) { struct bigmac *bp = dev_get_drvdata(&op->dev); struct device *parent = op->dev.parent; struct net_device *net_dev = bp->dev; struct of_device *qec_op; qec_op = to_of_device(parent); unregister_netdev(net_dev); of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); dma_free_coherent(&op->dev, PAGE_SIZE, bp->bmac_block, bp->bblock_dvma); free_netdev(net_dev); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id bigmac_sbus_match[] = { { .name = "be", }, {}, }; MODULE_DEVICE_TABLE(of, bigmac_sbus_match); static struct of_platform_driver bigmac_sbus_driver = { .driver = { .name = "sunbmac", .owner = THIS_MODULE, .of_match_table = bigmac_sbus_match, }, .probe = bigmac_sbus_probe, .remove = __devexit_p(bigmac_sbus_remove), }; static int __init bigmac_init(void) { return of_register_driver(&bigmac_sbus_driver, &of_bus_type); } static void __exit bigmac_exit(void) { of_unregister_driver(&bigmac_sbus_driver); } module_init(bigmac_init); module_exit(bigmac_exit);
gpl-2.0
jonathanfisher/wl18xx
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
1016
37405
/***************************************************************************** * * * File: cxgb2.c * * $Revision: 1.25 $ * * $Date: 2005/06/22 00:43:25 $ * * Description: * * Chelsio 10Gb Ethernet Driver. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License, version 2, as * * published by the Free Software Foundation. * * * * You should have received a copy of the GNU General Public License along * * with this program; if not, see <http://www.gnu.org/licenses/>. * * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * * * http://www.chelsio.com * * * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * * All rights reserved. * * * * Maintainers: maintainers@chelsio.com * * * * Authors: Dimitrios Michailidis <dm@chelsio.com> * * Tina Yang <tainay@chelsio.com> * * Felix Marti <felix@chelsio.com> * * Scott Bardone <sbardone@chelsio.com> * * Kurt Ottaway <kottaway@chelsio.com> * * Frank DiMambro <frank@chelsio.com> * * * * History: * * * ****************************************************************************/ #include "common.h" #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/mii.h> #include <linux/sockios.h> #include <linux/dma-mapping.h> #include <asm/uaccess.h> #include "cpl5_cmd.h" #include "regs.h" #include "gmac.h" #include "cphy.h" #include "sge.h" #include "tp.h" #include "espi.h" #include "elmer0.h" #include <linux/workqueue.h> static inline void schedule_mac_stats_update(struct adapter *ap, int secs) { schedule_delayed_work(&ap->stats_update_task, secs * HZ); } static inline void cancel_mac_stats_update(struct adapter *ap) { cancel_delayed_work(&ap->stats_update_task); } #define MAX_CMDQ_ENTRIES 16384 #define MAX_CMDQ1_ENTRIES 1024 #define MAX_RX_BUFFERS 16384 #define MAX_RX_JUMBO_BUFFERS 16384 #define MAX_TX_BUFFERS_HIGH 16384U #define MAX_TX_BUFFERS_LOW 1536U #define MAX_TX_BUFFERS 1460U #define MIN_FL_ENTRIES 32 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) /* * The EEPROM is actually bigger but only the first few bytes are used so we * only report those. */ #define EEPROM_SIZE 32 MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("Chelsio Communications"); MODULE_LICENSE("GPL"); static int dflt_msg_enable = DFLT_MSG_ENABLE; module_param(dflt_msg_enable, int, 0); MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap"); #define HCLOCK 0x0 #define LCLOCK 0x1 /* T1 cards powersave mode */ static int t1_clock(struct adapter *adapter, int mode); static int t1powersave = 1; /* HW default is powersave mode. */ module_param(t1powersave, int, 0); MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode"); static int disable_msi = 0; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); static const char pci_speed[][4] = { "33", "66", "100", "133" }; /* * Setup MAC to receive the types of packets we want. */ static void t1_set_rxmode(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; struct t1_rx_mode rm; rm.dev = dev; mac->ops->set_rx_mode(mac, &rm); } static void link_report(struct port_info *p) { if (!netif_carrier_ok(p->dev)) netdev_info(p->dev, "link down\n"); else { const char *s = "10Mbps"; switch (p->link_config.speed) { case SPEED_10000: s = "10Gbps"; break; case SPEED_1000: s = "1000Mbps"; break; case SPEED_100: s = "100Mbps"; break; } netdev_info(p->dev, "link up, %s, %s-duplex\n", s, p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); } } void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat, int speed, int duplex, int pause) { struct port_info *p = &adapter->port[port_id]; if (link_stat != netif_carrier_ok(p->dev)) { if (link_stat) netif_carrier_on(p->dev); else netif_carrier_off(p->dev); link_report(p); /* multi-ports: inform toe */ if ((speed > 0) && (adapter->params.nports > 1)) { unsigned int sched_speed = 10; switch (speed) { case SPEED_1000: sched_speed = 1000; break; case SPEED_100: sched_speed = 100; break; case SPEED_10: sched_speed = 10; break; } t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed); } } } static void link_start(struct port_info *p) { struct cmac *mac = p->mac; mac->ops->reset(mac); if (mac->ops->macaddress_set) mac->ops->macaddress_set(mac, p->dev->dev_addr); t1_set_rxmode(p->dev); t1_link_start(p->phy, mac, &p->link_config); mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); } static void enable_hw_csum(struct adapter *adapter) { if (adapter->port[0].dev->hw_features & NETIF_F_TSO) t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */ t1_tp_set_tcp_checksum_offload(adapter->tp, 1); } /* * Things to do upon first use of a card. * This must run with the rtnl lock held. */ static int cxgb_up(struct adapter *adapter) { int err = 0; if (!(adapter->flags & FULL_INIT_DONE)) { err = t1_init_hw_modules(adapter); if (err) goto out_err; enable_hw_csum(adapter); adapter->flags |= FULL_INIT_DONE; } t1_interrupts_clear(adapter); adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev); err = request_irq(adapter->pdev->irq, t1_interrupt, adapter->params.has_msi ? 0 : IRQF_SHARED, adapter->name, adapter); if (err) { if (adapter->params.has_msi) pci_disable_msi(adapter->pdev); goto out_err; } t1_sge_start(adapter->sge); t1_interrupts_enable(adapter); out_err: return err; } /* * Release resources when all the ports have been stopped. */ static void cxgb_down(struct adapter *adapter) { t1_sge_stop(adapter->sge); t1_interrupts_disable(adapter); free_irq(adapter->pdev->irq, adapter); if (adapter->params.has_msi) pci_disable_msi(adapter->pdev); } static int cxgb_open(struct net_device *dev) { int err; struct adapter *adapter = dev->ml_priv; int other_ports = adapter->open_device_map & PORT_MASK; napi_enable(&adapter->napi); if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { napi_disable(&adapter->napi); return err; } __set_bit(dev->if_port, &adapter->open_device_map); link_start(&adapter->port[dev->if_port]); netif_start_queue(dev); if (!other_ports && adapter->params.stats_update_period) schedule_mac_stats_update(adapter, adapter->params.stats_update_period); t1_vlan_mode(adapter, dev->features); return 0; } static int cxgb_close(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct cmac *mac = p->mac; netif_stop_queue(dev); napi_disable(&adapter->napi); mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); netif_carrier_off(dev); clear_bit(dev->if_port, &adapter->open_device_map); if (adapter->params.stats_update_period && !(adapter->open_device_map & PORT_MASK)) { /* Stop statistics accumulation. */ smp_mb__after_atomic(); spin_lock(&adapter->work_lock); /* sync with update task */ spin_unlock(&adapter->work_lock); cancel_mac_stats_update(adapter); } if (!adapter->open_device_map) cxgb_down(adapter); return 0; } static struct net_device_stats *t1_get_stats(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct net_device_stats *ns = &p->netstats; const struct cmac_statistics *pstats; /* Do a full update of the MAC stats */ pstats = p->mac->ops->statistics_update(p->mac, MAC_STATS_UPDATE_FULL); ns->tx_packets = pstats->TxUnicastFramesOK + pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK; ns->rx_packets = pstats->RxUnicastFramesOK + pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK; ns->tx_bytes = pstats->TxOctetsOK; ns->rx_bytes = pstats->RxOctetsOK; ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors + pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions; ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors + pstats->RxFCSErrors + pstats->RxAlignErrors + pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors + pstats->RxSymbolErrors + pstats->RxRuntErrors; ns->multicast = pstats->RxMulticastFramesOK; ns->collisions = pstats->TxTotalCollisions; /* detailed rx_errors */ ns->rx_length_errors = pstats->RxFrameTooLongErrors + pstats->RxJabberErrors; ns->rx_over_errors = 0; ns->rx_crc_errors = pstats->RxFCSErrors; ns->rx_frame_errors = pstats->RxAlignErrors; ns->rx_fifo_errors = 0; ns->rx_missed_errors = 0; /* detailed tx_errors */ ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions; ns->tx_carrier_errors = 0; ns->tx_fifo_errors = pstats->TxUnderrun; ns->tx_heartbeat_errors = 0; ns->tx_window_errors = pstats->TxLateCollisions; return ns; } static u32 get_msglevel(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; return adapter->msg_enable; } static void set_msglevel(struct net_device *dev, u32 val) { struct adapter *adapter = dev->ml_priv; adapter->msg_enable = val; } static const char stats_strings[][ETH_GSTRING_LEN] = { "TxOctetsOK", "TxOctetsBad", "TxUnicastFramesOK", "TxMulticastFramesOK", "TxBroadcastFramesOK", "TxPauseFrames", "TxFramesWithDeferredXmissions", "TxLateCollisions", "TxTotalCollisions", "TxFramesAbortedDueToXSCollisions", "TxUnderrun", "TxLengthErrors", "TxInternalMACXmitError", "TxFramesWithExcessiveDeferral", "TxFCSErrors", "TxJumboFramesOk", "TxJumboOctetsOk", "RxOctetsOK", "RxOctetsBad", "RxUnicastFramesOK", "RxMulticastFramesOK", "RxBroadcastFramesOK", "RxPauseFrames", "RxFCSErrors", "RxAlignErrors", "RxSymbolErrors", "RxDataErrors", "RxSequenceErrors", "RxRuntErrors", "RxJabberErrors", "RxInternalMACRcvError", "RxInRangeLengthErrors", "RxOutOfRangeLengthField", "RxFrameTooLongErrors", "RxJumboFramesOk", "RxJumboOctetsOk", /* Port stats */ "RxCsumGood", "TxCsumOffload", "TxTso", "RxVlan", "TxVlan", "TxNeedHeadroom", /* Interrupt stats */ "rx drops", "pure_rsps", "unhandled irqs", "respQ_empty", "respQ_overflow", "freelistQ_empty", "pkt_too_big", "pkt_mismatch", "cmdQ_full0", "cmdQ_full1", "espi_DIP2ParityErr", "espi_DIP4Err", "espi_RxDrops", "espi_TxDrops", "espi_RxOvfl", "espi_ParityErr" }; #define T2_REGMAP_SIZE (3 * 1024) static int get_regs_len(struct net_device *dev) { return T2_REGMAP_SIZE; } static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct adapter *adapter = dev->ml_priv; strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); } static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(stats_strings); default: return -EOPNOTSUPP; } } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) { if (stringset == ETH_SS_STATS) memcpy(data, stats_strings, sizeof(stats_strings)); } static void get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; const struct cmac_statistics *s; const struct sge_intr_counts *t; struct sge_port_stats ss; s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); t = t1_sge_get_intr_counts(adapter->sge); t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); *data++ = s->TxOctetsOK; *data++ = s->TxOctetsBad; *data++ = s->TxUnicastFramesOK; *data++ = s->TxMulticastFramesOK; *data++ = s->TxBroadcastFramesOK; *data++ = s->TxPauseFrames; *data++ = s->TxFramesWithDeferredXmissions; *data++ = s->TxLateCollisions; *data++ = s->TxTotalCollisions; *data++ = s->TxFramesAbortedDueToXSCollisions; *data++ = s->TxUnderrun; *data++ = s->TxLengthErrors; *data++ = s->TxInternalMACXmitError; *data++ = s->TxFramesWithExcessiveDeferral; *data++ = s->TxFCSErrors; *data++ = s->TxJumboFramesOK; *data++ = s->TxJumboOctetsOK; *data++ = s->RxOctetsOK; *data++ = s->RxOctetsBad; *data++ = s->RxUnicastFramesOK; *data++ = s->RxMulticastFramesOK; *data++ = s->RxBroadcastFramesOK; *data++ = s->RxPauseFrames; *data++ = s->RxFCSErrors; *data++ = s->RxAlignErrors; *data++ = s->RxSymbolErrors; *data++ = s->RxDataErrors; *data++ = s->RxSequenceErrors; *data++ = s->RxRuntErrors; *data++ = s->RxJabberErrors; *data++ = s->RxInternalMACRcvError; *data++ = s->RxInRangeLengthErrors; *data++ = s->RxOutOfRangeLengthField; *data++ = s->RxFrameTooLongErrors; *data++ = s->RxJumboFramesOK; *data++ = s->RxJumboOctetsOK; *data++ = ss.rx_cso_good; *data++ = ss.tx_cso; *data++ = ss.tx_tso; *data++ = ss.vlan_xtract; *data++ = ss.vlan_insert; *data++ = ss.tx_need_hdrroom; *data++ = t->rx_drops; *data++ = t->pure_rsps; *data++ = t->unhandled_irqs; *data++ = t->respQ_empty; *data++ = t->respQ_overflow; *data++ = t->freelistQ_empty; *data++ = t->pkt_too_big; *data++ = t->pkt_mismatch; *data++ = t->cmdQ_full[0]; *data++ = t->cmdQ_full[1]; if (adapter->espi) { const struct espi_intr_counts *e; e = t1_espi_get_intr_counts(adapter->espi); *data++ = e->DIP2_parity_err; *data++ = e->DIP4_err; *data++ = e->rx_drops; *data++ = e->tx_drops; *data++ = e->rx_ovflw; *data++ = e->parity_err; } } static inline void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, unsigned int end) { u32 *p = buf + start; for ( ; start <= end; start += sizeof(u32)) *p++ = readl(ap->regs + start); } static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { struct adapter *ap = dev->ml_priv; /* * Version scheme: bits 0..9: chip version, bits 10..15: chip revision */ regs->version = 2; memset(buf, 0, T2_REGMAP_SIZE); reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE); reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR); reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT); reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE); reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE); reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT); reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL); reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE); reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD); } static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; cmd->supported = p->link_config.supported; cmd->advertising = p->link_config.advertising; if (netif_carrier_ok(dev)) { ethtool_cmd_speed_set(cmd, p->link_config.speed); cmd->duplex = p->link_config.duplex; } else { ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); cmd->duplex = DUPLEX_UNKNOWN; } cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; cmd->phy_address = p->phy->mdio.prtad; cmd->transceiver = XCVR_EXTERNAL; cmd->autoneg = p->link_config.autoneg; cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; return 0; } static int speed_duplex_to_caps(int speed, int duplex) { int cap = 0; switch (speed) { case SPEED_10: if (duplex == DUPLEX_FULL) cap = SUPPORTED_10baseT_Full; else cap = SUPPORTED_10baseT_Half; break; case SPEED_100: if (duplex == DUPLEX_FULL) cap = SUPPORTED_100baseT_Full; else cap = SUPPORTED_100baseT_Half; break; case SPEED_1000: if (duplex == DUPLEX_FULL) cap = SUPPORTED_1000baseT_Full; else cap = SUPPORTED_1000baseT_Half; break; case SPEED_10000: if (duplex == DUPLEX_FULL) cap = SUPPORTED_10000baseT_Full; } return cap; } #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ ADVERTISED_10000baseT_Full) static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct link_config *lc = &p->link_config; if (!(lc->supported & SUPPORTED_Autoneg)) return -EOPNOTSUPP; /* can't change speed/duplex */ if (cmd->autoneg == AUTONEG_DISABLE) { u32 speed = ethtool_cmd_speed(cmd); int cap = speed_duplex_to_caps(speed, cmd->duplex); if (!(lc->supported & cap) || (speed == SPEED_1000)) return -EINVAL; lc->requested_speed = speed; lc->requested_duplex = cmd->duplex; lc->advertising = 0; } else { cmd->advertising &= ADVERTISED_MASK; if (cmd->advertising & (cmd->advertising - 1)) cmd->advertising = lc->supported; cmd->advertising &= lc->supported; if (!cmd->advertising) return -EINVAL; lc->requested_speed = SPEED_INVALID; lc->requested_duplex = DUPLEX_INVALID; lc->advertising = cmd->advertising | ADVERTISED_Autoneg; } lc->autoneg = cmd->autoneg; if (netif_running(dev)) t1_link_start(p->phy, p->mac, lc); return 0; } static void get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; } static int set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct link_config *lc = &p->link_config; if (epause->autoneg == AUTONEG_DISABLE) lc->requested_fc = 0; else if (lc->supported & SUPPORTED_Autoneg) lc->requested_fc = PAUSE_AUTONEG; else return -EINVAL; if (epause->rx_pause) lc->requested_fc |= PAUSE_RX; if (epause->tx_pause) lc->requested_fc |= PAUSE_TX; if (lc->autoneg == AUTONEG_ENABLE) { if (netif_running(dev)) t1_link_start(p->phy, p->mac, lc); } else { lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); if (netif_running(dev)) p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1, lc->fc); } return 0; } static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { struct adapter *adapter = dev->ml_priv; int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; e->rx_max_pending = MAX_RX_BUFFERS; e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; e->tx_max_pending = MAX_CMDQ_ENTRIES; e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl]; e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl]; e->tx_pending = adapter->params.sge.cmdQ_size[0]; } static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { struct adapter *adapter = dev->ml_priv; int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending || e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || e->tx_pending > MAX_CMDQ_ENTRIES || e->rx_pending < MIN_FL_ENTRIES || e->rx_jumbo_pending < MIN_FL_ENTRIES || e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1)) return -EINVAL; if (adapter->flags & FULL_INIT_DONE) return -EBUSY; adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; adapter->params.sge.cmdQ_size[0] = e->tx_pending; adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ? MAX_CMDQ1_ENTRIES : e->tx_pending; return 0; } static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct adapter *adapter = dev->ml_priv; adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); return 0; } static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct adapter *adapter = dev->ml_priv; c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs; c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable; return 0; } static int get_eeprom_len(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; return t1_is_asic(adapter) ? EEPROM_SIZE : 0; } #define EEPROM_MAGIC(ap) \ (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16)) static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, u8 *data) { int i; u8 buf[EEPROM_SIZE] __attribute__((aligned(4))); struct adapter *adapter = dev->ml_priv; e->magic = EEPROM_MAGIC(adapter); for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32)) t1_seeprom_read(adapter, i, (__le32 *)&buf[i]); memcpy(data, buf + e->offset, e->len); return 0; } static const struct ethtool_ops t1_ethtool_ops = { .get_settings = get_settings, .set_settings = set_settings, .get_drvinfo = get_drvinfo, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, .get_ringparam = get_sge_param, .set_ringparam = set_sge_param, .get_coalesce = get_coalesce, .set_coalesce = set_coalesce, .get_eeprom_len = get_eeprom_len, .get_eeprom = get_eeprom, .get_pauseparam = get_pauseparam, .set_pauseparam = set_pauseparam, .get_link = ethtool_op_get_link, .get_strings = get_strings, .get_sset_count = get_sset_count, .get_ethtool_stats = get_stats, .get_regs_len = get_regs_len, .get_regs = get_regs, }; static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { struct adapter *adapter = dev->ml_priv; struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio; return mdio_mii_ioctl(mdio, if_mii(req), cmd); } static int t1_change_mtu(struct net_device *dev, int new_mtu) { int ret; struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; if (!mac->ops->set_mtu) return -EOPNOTSUPP; if (new_mtu < 68) return -EINVAL; if ((ret = mac->ops->set_mtu(mac, new_mtu))) return ret; dev->mtu = new_mtu; return 0; } static int t1_set_mac_addr(struct net_device *dev, void *p) { struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; struct sockaddr *addr = p; if (!mac->ops->macaddress_set) return -EOPNOTSUPP; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); mac->ops->macaddress_set(mac, dev->dev_addr); return 0; } static netdev_features_t t1_fix_features(struct net_device *dev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_CTAG_TX; else features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } static int t1_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = dev->features ^ features; struct adapter *adapter = dev->ml_priv; if (changed & NETIF_F_HW_VLAN_CTAG_RX) t1_vlan_mode(adapter, features); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void t1_netpoll(struct net_device *dev) { unsigned long flags; struct adapter *adapter = dev->ml_priv; local_irq_save(flags); t1_interrupt(adapter->pdev->irq, adapter); local_irq_restore(flags); } #endif /* * Periodic accumulation of MAC statistics. This is used only if the MAC * does not have any other way to prevent stats counter overflow. */ static void mac_stats_task(struct work_struct *work) { int i; struct adapter *adapter = container_of(work, struct adapter, stats_update_task.work); for_each_port(adapter, i) { struct port_info *p = &adapter->port[i]; if (netif_running(p->dev)) p->mac->ops->statistics_update(p->mac, MAC_STATS_UPDATE_FAST); } /* Schedule the next statistics update if any port is active. */ spin_lock(&adapter->work_lock); if (adapter->open_device_map & PORT_MASK) schedule_mac_stats_update(adapter, adapter->params.stats_update_period); spin_unlock(&adapter->work_lock); } /* * Processes elmer0 external interrupts in process context. */ static void ext_intr_task(struct work_struct *work) { struct adapter *adapter = container_of(work, struct adapter, ext_intr_handler_task); t1_elmer0_ext_intr_handler(adapter); /* Now reenable external interrupts */ spin_lock_irq(&adapter->async_lock); adapter->slow_intr_mask |= F_PL_INTR_EXT; writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE); writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, adapter->regs + A_PL_ENABLE); spin_unlock_irq(&adapter->async_lock); } /* * Interrupt-context handler for elmer0 external interrupts. */ void t1_elmer0_ext_intr(struct adapter *adapter) { /* * Schedule a task to handle external interrupts as we require * a process context. We disable EXT interrupts in the interim * and let the task reenable them when it's done. */ adapter->slow_intr_mask &= ~F_PL_INTR_EXT; writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, adapter->regs + A_PL_ENABLE); schedule_work(&adapter->ext_intr_handler_task); } void t1_fatal_err(struct adapter *adapter) { if (adapter->flags & FULL_INIT_DONE) { t1_sge_stop(adapter->sge); t1_interrupts_disable(adapter); } pr_alert("%s: encountered fatal error, operation suspended\n", adapter->name); } static const struct net_device_ops cxgb_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, .ndo_start_xmit = t1_start_xmit, .ndo_get_stats = t1_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = t1_set_rxmode, .ndo_do_ioctl = t1_ioctl, .ndo_change_mtu = t1_change_mtu, .ndo_set_mac_address = t1_set_mac_addr, .ndo_fix_features = t1_fix_features, .ndo_set_features = t1_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = t1_netpoll, #endif }; static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, err, pci_using_dac = 0; unsigned long mmio_start, mmio_len; const struct board_info *bi; struct adapter *adapter = NULL; struct port_info *pi; pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION); err = pci_enable_device(pdev); if (err) return err; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { pr_err("%s: cannot find PCI device memory base address\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { pci_using_dac = 1; if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { pr_err("%s: unable to obtain 64-bit DMA for " "consistent allocations\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { pr_err("%s: no usable DMA configuration\n", pci_name(pdev)); goto out_disable_pdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev)); goto out_disable_pdev; } pci_set_master(pdev); mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); bi = t1_get_board_info(ent->driver_data); for (i = 0; i < bi->port_number; ++i) { struct net_device *netdev; netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter)); if (!netdev) { err = -ENOMEM; goto out_free_dev; } SET_NETDEV_DEV(netdev, &pdev->dev); if (!adapter) { adapter = netdev_priv(netdev); adapter->pdev = pdev; adapter->port[0].dev = netdev; /* so we don't leak it */ adapter->regs = ioremap(mmio_start, mmio_len); if (!adapter->regs) { pr_err("%s: cannot map device registers\n", pci_name(pdev)); err = -ENOMEM; goto out_free_dev; } if (t1_get_board_rev(adapter, bi, &adapter->params)) { err = -ENODEV; /* Can't handle this chip rev */ goto out_free_dev; } adapter->name = pci_name(pdev); adapter->msg_enable = dflt_msg_enable; adapter->mmio_len = mmio_len; spin_lock_init(&adapter->tpi_lock); spin_lock_init(&adapter->work_lock); spin_lock_init(&adapter->async_lock); spin_lock_init(&adapter->mac_lock); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); INIT_DELAYED_WORK(&adapter->stats_update_task, mac_stats_task); pci_set_drvdata(pdev, netdev); } pi = &adapter->port[i]; pi->dev = netdev; netif_carrier_off(netdev); netdev->irq = pdev->irq; netdev->if_port = i; netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len - 1; netdev->ml_priv = adapter; netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_LLTX; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; if (vlan_tso_capable(adapter)) { netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; /* T204: disable TSO */ if (!(is_T2(adapter)) || bi->port_number != 4) { netdev->hw_features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO; } } netdev->netdev_ops = &cxgb_netdev_ops; netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ? sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); netif_napi_add(netdev, &adapter->napi, t1_poll, 64); netdev->ethtool_ops = &t1_ethtool_ops; } if (t1_init_sw_modules(adapter, bi) < 0) { err = -ENODEV; goto out_free_dev; } /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only * with the ports we manage to register successfully. However we must * register at least one net device. */ for (i = 0; i < bi->port_number; ++i) { err = register_netdev(adapter->port[i].dev); if (err) pr_warn("%s: cannot register net device %s, skipping\n", pci_name(pdev), adapter->port[i].dev->name); else { /* * Change the name we use for messages to the name of * the first successfully registered interface. */ if (!adapter->registered_device_map) adapter->name = adapter->port[i].dev->name; __set_bit(i, &adapter->registered_device_map); } } if (!adapter->registered_device_map) { pr_err("%s: could not register any net devices\n", pci_name(pdev)); goto out_release_adapter_res; } pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name, bi->desc, adapter->params.chip_revision, adapter->params.pci.is_pcix ? "PCIX" : "PCI", adapter->params.pci.speed, adapter->params.pci.width); /* * Set the T1B ASIC and memory clocks. */ if (t1powersave) adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */ else adapter->t1powersave = HCLOCK; if (t1_is_T1B(adapter)) t1_clock(adapter, t1powersave); return 0; out_release_adapter_res: t1_free_sw_modules(adapter); out_free_dev: if (adapter) { if (adapter->regs) iounmap(adapter->regs); for (i = bi->port_number - 1; i >= 0; --i) if (adapter->port[i].dev) free_netdev(adapter->port[i].dev); } pci_release_regions(pdev); out_disable_pdev: pci_disable_device(pdev); return err; } static void bit_bang(struct adapter *adapter, int bitdata, int nbits) { int data; int i; u32 val; enum { S_CLOCK = 1 << 3, S_DATA = 1 << 4 }; for (i = (nbits - 1); i > -1; i--) { udelay(50); data = ((bitdata >> i) & 0x1); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); if (data) val |= S_DATA; else val &= ~S_DATA; udelay(50); /* Set SCLOCK low */ val &= ~S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Write SCLOCK high */ val |= S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); } } static int t1_clock(struct adapter *adapter, int mode) { u32 val; int M_CORE_VAL; int M_MEM_VAL; enum { M_CORE_BITS = 9, T_CORE_VAL = 0, T_CORE_BITS = 2, N_CORE_VAL = 0, N_CORE_BITS = 2, M_MEM_BITS = 9, T_MEM_VAL = 0, T_MEM_BITS = 2, N_MEM_VAL = 0, N_MEM_BITS = 2, NP_LOAD = 1 << 17, S_LOAD_MEM = 1 << 5, S_LOAD_CORE = 1 << 6, S_CLOCK = 1 << 3 }; if (!t1_is_T1B(adapter)) return -ENODEV; /* Can't re-clock this chip. */ if (mode & 2) return 0; /* show current mode. */ if ((adapter->t1powersave & 1) == (mode & 1)) return -EALREADY; /* ASIC already running in mode. */ if ((mode & 1) == HCLOCK) { M_CORE_VAL = 0x14; M_MEM_VAL = 0x18; adapter->t1powersave = HCLOCK; /* overclock */ } else { M_CORE_VAL = 0xe; M_MEM_VAL = 0x10; adapter->t1powersave = LCLOCK; /* underclock */ } /* Don't interrupt this serial stream! */ spin_lock(&adapter->tpi_lock); /* Initialize for ASIC core */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= NP_LOAD; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_CORE; val &= ~S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Serial program the ASIC clock synthesizer */ bit_bang(adapter, T_CORE_VAL, T_CORE_BITS); bit_bang(adapter, N_CORE_VAL, N_CORE_BITS); bit_bang(adapter, M_CORE_VAL, M_CORE_BITS); udelay(50); /* Finish ASIC core */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= S_LOAD_CORE; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_CORE; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Initialize for memory */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= NP_LOAD; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_MEM; val &= ~S_CLOCK; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Serial program the memory clock synthesizer */ bit_bang(adapter, T_MEM_VAL, T_MEM_BITS); bit_bang(adapter, N_MEM_VAL, N_MEM_BITS); bit_bang(adapter, M_MEM_VAL, M_MEM_BITS); udelay(50); /* Finish memory */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= S_LOAD_MEM; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_MEM; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); spin_unlock(&adapter->tpi_lock); return 0; } static inline void t1_sw_reset(struct pci_dev *pdev) { pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0); } static void remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct adapter *adapter = dev->ml_priv; int i; for_each_port(adapter, i) { if (test_bit(i, &adapter->registered_device_map)) unregister_netdev(adapter->port[i].dev); } t1_free_sw_modules(adapter); iounmap(adapter->regs); while (--i >= 0) { if (adapter->port[i].dev) free_netdev(adapter->port[i].dev); } pci_release_regions(pdev); pci_disable_device(pdev); t1_sw_reset(pdev); } static struct pci_driver cxgb_pci_driver = { .name = DRV_NAME, .id_table = t1_pci_tbl, .probe = init_one, .remove = remove_one, }; module_pci_driver(cxgb_pci_driver);
gpl-2.0
imoseyon/leanKernel-i500-gingerbread
drivers/usb/gadget/f_eem.c
1016
14274
/* * f_eem.c -- USB CDC Ethernet (EEM) link function driver * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2008 Nokia Corporation * Copyright (C) 2009 EF Johnson Technologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/slab.h> #include "u_ether.h" #define EEM_HLEN 2 /* * This function is a "CDC Ethernet Emulation Model" (CDC EEM) * Ethernet link. */ struct eem_ep_descs { struct usb_endpoint_descriptor *in; struct usb_endpoint_descriptor *out; }; struct f_eem { struct gether port; u8 ctrl_id; struct eem_ep_descs fs; struct eem_ep_descs hs; }; static inline struct f_eem *func_to_eem(struct usb_function *f) { return container_of(f, struct f_eem, port.func); } /*-------------------------------------------------------------------------*/ /* interface descriptor: */ static struct usb_interface_descriptor eem_intf __initdata = { .bLength = sizeof eem_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_EEM, .bInterfaceProtocol = USB_CDC_PROTO_EEM, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor eem_fs_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor eem_fs_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *eem_fs_function[] __initdata = { /* CDC EEM control descriptors */ (struct usb_descriptor_header *) &eem_intf, (struct usb_descriptor_header *) &eem_fs_in_desc, (struct usb_descriptor_header *) &eem_fs_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor eem_hs_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor eem_hs_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *eem_hs_function[] __initdata = { /* CDC EEM control descriptors */ (struct usb_descriptor_header *) &eem_intf, (struct usb_descriptor_header *) &eem_hs_in_desc, (struct usb_descriptor_header *) &eem_hs_out_desc, NULL, }; /* string descriptors: */ static struct usb_string eem_string_defs[] = { [0].s = "CDC Ethernet Emulation Model (EEM)", { } /* end of list */ }; static struct usb_gadget_strings eem_string_table = { .language = 0x0409, /* en-us */ .strings = eem_string_defs, }; static struct usb_gadget_strings *eem_strings[] = { &eem_string_table, NULL, }; /*-------------------------------------------------------------------------*/ static int eem_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = f->config->cdev; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* device either stalls (value < 0) or reports success */ return value; } static int eem_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_eem *eem = func_to_eem(f); struct usb_composite_dev *cdev = f->config->cdev; struct net_device *net; /* we know alt == 0, so this is an activation or a reset */ if (alt != 0) goto fail; if (intf == eem->ctrl_id) { if (eem->port.in_ep->driver_data) { DBG(cdev, "reset eem\n"); gether_disconnect(&eem->port); } if (!eem->port.in) { DBG(cdev, "init eem\n"); eem->port.in = ep_choose(cdev->gadget, eem->hs.in, eem->fs.in); eem->port.out = ep_choose(cdev->gadget, eem->hs.out, eem->fs.out); } /* zlps should not occur because zero-length EEM packets * will be inserted in those cases where they would occur */ eem->port.is_zlp_ok = 1; eem->port.cdc_filter = DEFAULT_FILTER; DBG(cdev, "activate eem\n"); net = gether_connect(&eem->port); if (IS_ERR(net)) return PTR_ERR(net); } else goto fail; return 0; fail: return -EINVAL; } static void eem_disable(struct usb_function *f) { struct f_eem *eem = func_to_eem(f); struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "eem deactivated\n"); if (eem->port.in_ep->driver_data) gether_disconnect(&eem->port); } /*-------------------------------------------------------------------------*/ /* EEM function driver setup/binding */ static int __init eem_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_eem *eem = func_to_eem(f); int status; struct usb_ep *ep; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; eem->ctrl_id = status; eem_intf.bInterfaceNumber = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc); if (!ep) goto fail; eem->port.in_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc); if (!ep) goto fail; eem->port.out_ep = ep; ep->driver_data = cdev; /* claim */ status = -ENOMEM; /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(eem_fs_function); if (!f->descriptors) goto fail; eem->fs.in = usb_find_endpoint(eem_fs_function, f->descriptors, &eem_fs_in_desc); eem->fs.out = usb_find_endpoint(eem_fs_function, f->descriptors, &eem_fs_out_desc); /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { eem_hs_in_desc.bEndpointAddress = eem_fs_in_desc.bEndpointAddress; eem_hs_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(eem_hs_function); if (!f->hs_descriptors) goto fail; eem->hs.in = usb_find_endpoint(eem_hs_function, f->hs_descriptors, &eem_hs_in_desc); eem->hs.out = usb_find_endpoint(eem_hs_function, f->hs_descriptors, &eem_hs_out_desc); } DBG(cdev, "CDC Ethernet (EEM): %s speed IN/%s OUT/%s\n", gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", eem->port.in_ep->name, eem->port.out_ep->name); return 0; fail: if (f->descriptors) usb_free_descriptors(f->descriptors); /* we might as well release our claims on endpoints */ if (eem->port.out) eem->port.out_ep->driver_data = NULL; if (eem->port.in) eem->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static void eem_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_eem *eem = func_to_eem(f); DBG(c->cdev, "eem unbind\n"); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(eem); } static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) { } /* * Add the EEM header and ethernet checksum. * We currently do not attempt to put multiple ethernet frames * into a single USB transfer */ static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb) { struct sk_buff *skb2 = NULL; struct usb_ep *in = port->in_ep; int padlen = 0; u16 len = skb->len; if (!skb_cloned(skb)) { int headroom = skb_headroom(skb); int tailroom = skb_tailroom(skb); /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0, * stick two bytes of zero-length EEM packet on the end. */ if (((len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) == 0) padlen += 2; if ((tailroom >= (ETH_FCS_LEN + padlen)) && (headroom >= EEM_HLEN)) goto done; } skb2 = skb_copy_expand(skb, EEM_HLEN, ETH_FCS_LEN + padlen, GFP_ATOMIC); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return skb; done: /* use the "no CRC" option */ put_unaligned_be32(0xdeadbeef, skb_put(skb, 4)); /* EEM packet header format: * b0..13: length of ethernet frame * b14: bmCRC (0 == sentinel CRC) * b15: bmType (0 == data) */ len = skb->len; put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2)); /* add a zero-length EEM packet, if needed */ if (padlen) put_unaligned_le16(0, skb_put(skb, 2)); return skb; } /* * Remove the EEM header. Note that there can be many EEM packets in a single * USB transfer, so we need to break them out and handle them independently. */ static int eem_unwrap(struct gether *port, struct sk_buff *skb, struct sk_buff_head *list) { struct usb_composite_dev *cdev = port->func.config->cdev; int status = 0; do { struct sk_buff *skb2; u16 header; u16 len = 0; if (skb->len < EEM_HLEN) { status = -EINVAL; DBG(cdev, "invalid EEM header\n"); goto error; } /* remove the EEM header */ header = get_unaligned_le16(skb->data); skb_pull(skb, EEM_HLEN); /* EEM packet header format: * b0..14: EEM type dependent (data or command) * b15: bmType (0 == data, 1 == command) */ if (header & BIT(15)) { struct usb_request *req = cdev->req; u16 bmEEMCmd; /* EEM command packet format: * b0..10: bmEEMCmdParam * b11..13: bmEEMCmd * b14: reserved (must be zero) * b15: bmType (1 == command) */ if (header & BIT(14)) continue; bmEEMCmd = (header >> 11) & 0x7; switch (bmEEMCmd) { case 0: /* echo */ len = header & 0x7FF; if (skb->len < len) { status = -EOVERFLOW; goto error; } skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) { DBG(cdev, "EEM echo response error\n"); goto next; } skb_trim(skb2, len); put_unaligned_le16(BIT(15) | BIT(11) | len, skb_push(skb2, 2)); skb_copy_bits(skb, 0, req->buf, skb->len); req->length = skb->len; req->complete = eem_cmd_complete; req->zero = 1; if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) DBG(cdev, "echo response queue fail\n"); break; case 1: /* echo response */ case 2: /* suspend hint */ case 3: /* response hint */ case 4: /* response complete hint */ case 5: /* tickle */ default: /* reserved */ continue; } } else { u32 crc, crc2; struct sk_buff *skb3; /* check for zero-length EEM packet */ if (header == 0) continue; /* EEM data packet format: * b0..13: length of ethernet frame * b14: bmCRC (0 == sentinel, 1 == calculated) * b15: bmType (0 == data) */ len = header & 0x3FFF; if ((skb->len < len) || (len < (ETH_HLEN + ETH_FCS_LEN))) { status = -EINVAL; goto error; } /* validate CRC */ if (header & BIT(14)) { crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN); crc2 = ~crc32_le(~0, skb->data, len - ETH_FCS_LEN); } else { crc = get_unaligned_be32(skb->data + len - ETH_FCS_LEN); crc2 = 0xdeadbeef; } if (crc != crc2) { DBG(cdev, "invalid EEM CRC\n"); goto next; } skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) { DBG(cdev, "unable to unframe EEM packet\n"); continue; } skb_trim(skb2, len - ETH_FCS_LEN); skb3 = skb_copy_expand(skb2, NET_IP_ALIGN, 0, GFP_ATOMIC); if (unlikely(!skb3)) { DBG(cdev, "unable to realign EEM packet\n"); dev_kfree_skb_any(skb2); continue; } dev_kfree_skb_any(skb2); skb_queue_tail(list, skb3); } next: skb_pull(skb, len); } while (skb->len); error: dev_kfree_skb_any(skb); return status; } /** * eem_bind_config - add CDC Ethernet (EEM) network link to a configuration * @c: the configuration to support the network link * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. * * Caller must have called @gether_setup(). Caller is also responsible * for calling @gether_cleanup() before module unload. */ int __init eem_bind_config(struct usb_configuration *c) { struct f_eem *eem; int status; /* maybe allocate device-global string IDs */ if (eem_string_defs[0].id == 0) { /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; eem_string_defs[0].id = status; eem_intf.iInterface = status; } /* allocate and initialize one new instance */ eem = kzalloc(sizeof *eem, GFP_KERNEL); if (!eem) return -ENOMEM; eem->port.cdc_filter = DEFAULT_FILTER; eem->port.func.name = "cdc_eem"; eem->port.func.strings = eem_strings; /* descriptors are per-instance copies */ eem->port.func.bind = eem_bind; eem->port.func.unbind = eem_unbind; eem->port.func.set_alt = eem_set_alt; eem->port.func.setup = eem_setup; eem->port.func.disable = eem_disable; eem->port.wrap = eem_wrap; eem->port.unwrap = eem_unwrap; eem->port.header_len = EEM_HLEN; status = usb_add_function(c, &eem->port.func); if (status) kfree(eem); return status; }
gpl-2.0
ion-storm/Unleashed-N4
drivers/video/msm/mdss/mdss_dsi_panel.c
1272
10590
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/slab.h> #include "mdss_dsi.h" #define DT_CMD_HDR 6 static struct dsi_buf dsi_panel_tx_buf; static struct dsi_buf dsi_panel_rx_buf; static struct dsi_cmd_desc *dsi_panel_on_cmds; static struct dsi_cmd_desc *dsi_panel_off_cmds; static int num_of_on_cmds; static int num_of_off_cmds; static char *on_cmds, *off_cmds; static int mdss_dsi_panel_on(struct mdss_panel_data *pdata) { struct mipi_panel_info *mipi; mipi = &pdata->panel_info.mipi; pr_debug("%s:%d, debug info (mode) : %d\n", __func__, __LINE__, mipi->mode); if (mipi->mode == DSI_VIDEO_MODE) { mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf, dsi_panel_on_cmds, num_of_on_cmds); } else { pr_err("%s:%d, CMD MODE NOT SUPPORTED", __func__, __LINE__); return -EINVAL; } return 0; } static int mdss_dsi_panel_off(struct mdss_panel_data *pdata) { struct mipi_panel_info *mipi; mipi = &pdata->panel_info.mipi; pr_debug("%s:%d, debug info\n", __func__, __LINE__); if (mipi->mode == DSI_VIDEO_MODE) { mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf, dsi_panel_off_cmds, num_of_off_cmds); } else { pr_debug("%s:%d, CMD mode not supported", __func__, __LINE__); return -EINVAL; } return 0; } static int mdss_panel_parse_dt(struct platform_device *pdev, struct mdss_panel_common_pdata *panel_data) { struct device_node *np = pdev->dev.of_node; u32 res[6], tmp; int rc, i, len; int cmd_plen, data_offset; const char *data; rc = of_property_read_u32_array(np, "qcom,mdss-pan-res", res, 2); if (rc) { pr_err("%s:%d, panel resolution not specified\n", __func__, __LINE__); return -EINVAL; } panel_data->panel_info.xres = (!rc ? res[0] : 640); panel_data->panel_info.yres = (!rc ? res[1] : 480); rc = of_property_read_u32(np, "qcom,mdss-pan-bpp", &tmp); if (rc) { pr_err("%s:%d, panel bpp not specified\n", __func__, __LINE__); return -EINVAL; } panel_data->panel_info.bpp = (!rc ? tmp : 24); rc = of_property_read_u32_array(np, "qcom,mdss-pan-porch-values", res, 6); panel_data->panel_info.lcdc.h_back_porch = (!rc ? res[0] : 6); panel_data->panel_info.lcdc.h_pulse_width = (!rc ? res[1] : 2); panel_data->panel_info.lcdc.h_front_porch = (!rc ? res[2] : 6); panel_data->panel_info.lcdc.v_back_porch = (!rc ? res[3] : 6); panel_data->panel_info.lcdc.v_pulse_width = (!rc ? res[4] : 2); panel_data->panel_info.lcdc.v_front_porch = (!rc ? res[5] : 6); rc = of_property_read_u32(np, "qcom,mdss-pan-underflow-clr", &tmp); panel_data->panel_info.lcdc.underflow_clr = (!rc ? tmp : 0xff); rc = of_property_read_u32_array(np, "qcom,mdss-pan-bl-levels", res, 2); panel_data->panel_info.bl_min = (!rc ? res[0] : 0); panel_data->panel_info.bl_max = (!rc ? res[1] : 255); rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mode", &tmp); panel_data->panel_info.mipi.mode = (!rc ? tmp : DSI_VIDEO_MODE); rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-h-pulse-mode", &tmp); panel_data->panel_info.mipi.pulse_mode_hsa_he = (!rc ? tmp : false); rc = of_property_read_u32_array(np, "qcom,mdss-pan-dsi-h-power-stop", res, 3); panel_data->panel_info.mipi.hbp_power_stop = (!rc ? res[0] : false); panel_data->panel_info.mipi.hsa_power_stop = (!rc ? res[1] : false); panel_data->panel_info.mipi.hfp_power_stop = (!rc ? res[2] : false); rc = of_property_read_u32_array(np, "qcom,mdss-pan-dsi-bllp-power-stop", res, 2); panel_data->panel_info.mipi.bllp_power_stop = (!rc ? res[0] : false); panel_data->panel_info.mipi.eof_bllp_power_stop = (!rc ? res[1] : false); rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-traffic-mode", &tmp); panel_data->panel_info.mipi.traffic_mode = (!rc ? tmp : DSI_NON_BURST_SYNCH_PULSE); rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-dst-format", &tmp); panel_data->panel_info.mipi.dst_format = (!rc ? tmp : DSI_VIDEO_DST_FORMAT_RGB888); rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-vc", &tmp); panel_data->panel_info.mipi.vc = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-rgb-swap", &tmp); panel_data->panel_info.mipi.rgb_swap = (!rc ? tmp : DSI_RGB_SWAP_RGB); rc = of_property_read_u32_array(np, "qcom,mdss-pan-dsi-data-lanes", res, 4); panel_data->panel_info.mipi.data_lane0 = (!rc ? res[0] : true); panel_data->panel_info.mipi.data_lane1 = (!rc ? res[1] : false); panel_data->panel_info.mipi.data_lane2 = (!rc ? res[2] : false); panel_data->panel_info.mipi.data_lane3 = (!rc ? res[3] : false); rc = of_property_read_u32_array(np, "qcom,mdss-pan-dsi-t-clk", res, 2); panel_data->panel_info.mipi.t_clk_pre = (!rc ? res[0] : 0x24); panel_data->panel_info.mipi.t_clk_post = (!rc ? res[1] : 0x03); rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-stream", &tmp); panel_data->panel_info.mipi.stream = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mdp-tr", &tmp); panel_data->panel_info.mipi.mdp_trigger = (!rc ? tmp : DSI_CMD_TRIGGER_SW); if (panel_data->panel_info.mipi.mdp_trigger > 6) { pr_err("%s:%d, Invalid mdp trigger. Forcing to sw trigger", __func__, __LINE__); panel_data->panel_info.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW; } rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-dma-tr", &tmp); panel_data->panel_info.mipi.dma_trigger = (!rc ? tmp : DSI_CMD_TRIGGER_SW); if (panel_data->panel_info.mipi.dma_trigger > 6) { pr_err("%s:%d, Invalid dma trigger. Forcing to sw trigger", __func__, __LINE__); panel_data->panel_info.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; } rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-frame-rate", &tmp); panel_data->panel_info.mipi.frame_rate = (!rc ? tmp : 60); data = of_get_property(np, "qcom,panel-on-cmds", &len); if (!data) { pr_err("%s:%d, Unable to read ON cmds", __func__, __LINE__); goto error; } on_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL); if (!on_cmds) return -ENOMEM; memcpy(on_cmds, data, len); data_offset = 0; cmd_plen = 0; while ((len - data_offset) >= DT_CMD_HDR) { data_offset += (DT_CMD_HDR - 1); cmd_plen = on_cmds[data_offset++]; data_offset += cmd_plen; num_of_on_cmds++; } if (!num_of_on_cmds) { pr_err("%s:%d, No ON cmds specified", __func__, __LINE__); goto error; } dsi_panel_on_cmds = kzalloc((num_of_on_cmds * sizeof(struct dsi_cmd_desc)), GFP_KERNEL); if (!dsi_panel_on_cmds) return -ENOMEM; data_offset = 0; for (i = 0; i < num_of_on_cmds; i++) { dsi_panel_on_cmds[i].dtype = on_cmds[data_offset++]; dsi_panel_on_cmds[i].last = on_cmds[data_offset++]; dsi_panel_on_cmds[i].vc = on_cmds[data_offset++]; dsi_panel_on_cmds[i].ack = on_cmds[data_offset++]; dsi_panel_on_cmds[i].wait = on_cmds[data_offset++]; dsi_panel_on_cmds[i].dlen = on_cmds[data_offset++]; dsi_panel_on_cmds[i].payload = &on_cmds[data_offset]; data_offset += (dsi_panel_on_cmds[i].dlen); } if (data_offset != len) { pr_err("%s:%d, Incorrect ON command entries", __func__, __LINE__); goto error; } data = of_get_property(np, "qcom,panel-off-cmds", &len); if (!data) { pr_err("%s:%d, Unable to read OFF cmds", __func__, __LINE__); goto error; } off_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL); if (!off_cmds) return -ENOMEM; memcpy(off_cmds, data, len); data_offset = 0; cmd_plen = 0; while ((len - data_offset) >= DT_CMD_HDR) { data_offset += (DT_CMD_HDR - 1); cmd_plen = off_cmds[data_offset++]; data_offset += cmd_plen; num_of_off_cmds++; } if (!num_of_off_cmds) { pr_err("%s:%d, No OFF cmds specified", __func__, __LINE__); goto error; } dsi_panel_off_cmds = kzalloc(num_of_off_cmds * sizeof(struct dsi_cmd_desc), GFP_KERNEL); if (!dsi_panel_off_cmds) return -ENOMEM; data_offset = 0; for (i = 0; i < num_of_off_cmds; i++) { dsi_panel_off_cmds[i].dtype = off_cmds[data_offset++]; dsi_panel_off_cmds[i].last = off_cmds[data_offset++]; dsi_panel_off_cmds[i].vc = off_cmds[data_offset++]; dsi_panel_off_cmds[i].ack = off_cmds[data_offset++]; dsi_panel_off_cmds[i].wait = off_cmds[data_offset++]; dsi_panel_off_cmds[i].dlen = off_cmds[data_offset++]; dsi_panel_off_cmds[i].payload = &off_cmds[data_offset]; data_offset += (dsi_panel_off_cmds[i].dlen); } if (data_offset != len) { pr_err("%s:%d, Incorrect OFF command entries", __func__, __LINE__); goto error; } return 0; error: kfree(dsi_panel_on_cmds); kfree(dsi_panel_off_cmds); kfree(on_cmds); kfree(off_cmds); return -EINVAL; } static int __devinit mdss_dsi_panel_probe(struct platform_device *pdev) { int rc = 0; struct mdss_panel_common_pdata *vendor_pdata = NULL; static const char *panel_name; if (pdev->dev.parent == NULL) { pr_err("%s: parent device missing\n", __func__); return -ENODEV; } pr_debug("%s:%d, debug info id=%d", __func__, __LINE__, pdev->id); if (!pdev->dev.of_node) return -ENODEV; panel_name = of_get_property(pdev->dev.of_node, "label", NULL); if (!panel_name) pr_info("%s:%d, panel name not specified\n", __func__, __LINE__); else pr_info("%s: Panel Name = %s\n", __func__, panel_name); vendor_pdata = devm_kzalloc(&pdev->dev, sizeof(*vendor_pdata), GFP_KERNEL); if (!vendor_pdata) return -ENOMEM; rc = mdss_panel_parse_dt(pdev, vendor_pdata); if (rc) { devm_kfree(&pdev->dev, vendor_pdata); vendor_pdata = NULL; return rc; } vendor_pdata->on = mdss_dsi_panel_on; vendor_pdata->off = mdss_dsi_panel_off; rc = dsi_panel_device_register(pdev, vendor_pdata); if (rc) return rc; return 0; } static const struct of_device_id mdss_dsi_panel_match[] = { {.compatible = "qcom,mdss-dsi-panel"}, {} }; static struct platform_driver this_driver = { .probe = mdss_dsi_panel_probe, .driver = { .name = "dsi_panel", .of_match_table = mdss_dsi_panel_match, }, }; static int __init mdss_dsi_panel_init(void) { mdss_dsi_buf_alloc(&dsi_panel_tx_buf, DSI_BUF_SIZE); mdss_dsi_buf_alloc(&dsi_panel_rx_buf, DSI_BUF_SIZE); return platform_driver_register(&this_driver); } module_init(mdss_dsi_panel_init);
gpl-2.0
jrior001/evitaul-3.4.100-HTC
drivers/infiniband/hw/cxgb4/cm.c
2040
72106
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/list.h> #include <linux/workqueue.h> #include <linux/skbuff.h> #include <linux/timer.h> #include <linux/notifier.h> #include <linux/inetdevice.h> #include <linux/ip.h> #include <linux/tcp.h> #include <net/neighbour.h> #include <net/netevent.h> #include <net/route.h> #include "iw_cxgb4.h" static char *states[] = { "idle", "listen", "connecting", "mpa_wait_req", "mpa_req_sent", "mpa_req_rcvd", "mpa_rep_sent", "fpdu_mode", "aborting", "closing", "moribund", "dead", NULL, }; static int dack_mode = 1; module_param(dack_mode, int, 0644); MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); int c4iw_max_read_depth = 8; module_param(c4iw_max_read_depth, int, 0644); MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); static int enable_tcp_timestamps; module_param(enable_tcp_timestamps, int, 0644); MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); static int enable_tcp_sack; module_param(enable_tcp_sack, int, 0644); MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); static int enable_tcp_window_scaling = 1; module_param(enable_tcp_window_scaling, int, 0644); MODULE_PARM_DESC(enable_tcp_window_scaling, "Enable tcp window scaling (default=1)"); int c4iw_debug; module_param(c4iw_debug, int, 0644); MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); static int peer2peer; module_param(peer2peer, int, 0644); MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; module_param(p2p_type, int, 0644); MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " "1=RDMA_READ 0=RDMA_WRITE (default 1)"); static int ep_timeout_secs = 60; module_param(ep_timeout_secs, int, 0644); MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " "in seconds (default=60)"); static int mpa_rev = 1; module_param(mpa_rev, int, 0644); MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" " compliant (default=1)"); static int markers_enabled; module_param(markers_enabled, int, 0644); MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); static int crc_enabled = 1; module_param(crc_enabled, int, 0644); MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); static int rcv_win = 256 * 1024; module_param(rcv_win, int, 0644); MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); static int snd_win = 128 * 1024; module_param(snd_win, int, 0644); MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); static struct workqueue_struct *workq; static struct sk_buff_head rxq; static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); static void ep_timeout(unsigned long arg); static void connect_reply_upcall(struct c4iw_ep *ep, int status); static LIST_HEAD(timeout_list); static spinlock_t timeout_lock; static void start_ep_timer(struct c4iw_ep *ep) { PDBG("%s ep %p\n", __func__, ep); if (timer_pending(&ep->timer)) { PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); del_timer_sync(&ep->timer); } else c4iw_get_ep(&ep->com); ep->timer.expires = jiffies + ep_timeout_secs * HZ; ep->timer.data = (unsigned long)ep; ep->timer.function = ep_timeout; add_timer(&ep->timer); } static void stop_ep_timer(struct c4iw_ep *ep) { PDBG("%s ep %p\n", __func__, ep); if (!timer_pending(&ep->timer)) { printk(KERN_ERR "%s timer stopped when its not running! " "ep %p state %u\n", __func__, ep, ep->com.state); WARN_ON(1); return; } del_timer_sync(&ep->timer); c4iw_put_ep(&ep->com); } static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, struct l2t_entry *l2e) { int error = 0; if (c4iw_fatal_error(rdev)) { kfree_skb(skb); PDBG("%s - device in error state - dropping\n", __func__); return -EIO; } error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); if (error < 0) kfree_skb(skb); return error < 0 ? error : 0; } int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) { int error = 0; if (c4iw_fatal_error(rdev)) { kfree_skb(skb); PDBG("%s - device in error state - dropping\n", __func__); return -EIO; } error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); if (error < 0) kfree_skb(skb); return error < 0 ? error : 0; } static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) { struct cpl_tid_release *req; skb = get_skb(skb, sizeof *req, GFP_KERNEL); if (!skb) return; req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); INIT_TP_WR(req, hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); c4iw_ofld_send(rdev, skb); return; } static void set_emss(struct c4iw_ep *ep, u16 opt) { ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; ep->mss = ep->emss; if (GET_TCPOPT_TSTAMP(opt)) ep->emss -= 12; if (ep->emss < 128) ep->emss = 128; PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), ep->mss, ep->emss); } static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) { enum c4iw_ep_state state; mutex_lock(&epc->mutex); state = epc->state; mutex_unlock(&epc->mutex); return state; } static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) { epc->state = new; } static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) { mutex_lock(&epc->mutex); PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); __state_set(epc, new); mutex_unlock(&epc->mutex); return; } static void *alloc_ep(int size, gfp_t gfp) { struct c4iw_ep_common *epc; epc = kzalloc(size, gfp); if (epc) { kref_init(&epc->kref); mutex_init(&epc->mutex); c4iw_init_wr_wait(&epc->wr_wait); } PDBG("%s alloc ep %p\n", __func__, epc); return epc; } void _c4iw_free_ep(struct kref *kref) { struct c4iw_ep *ep; ep = container_of(kref, struct c4iw_ep, com.kref); PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); } kfree(ep); } static void release_ep_resources(struct c4iw_ep *ep) { set_bit(RELEASE_RESOURCES, &ep->com.flags); c4iw_put_ep(&ep->com); } static int status2errno(int status) { switch (status) { case CPL_ERR_NONE: return 0; case CPL_ERR_CONN_RESET: return -ECONNRESET; case CPL_ERR_ARP_MISS: return -EHOSTUNREACH; case CPL_ERR_CONN_TIMEDOUT: return -ETIMEDOUT; case CPL_ERR_TCAM_FULL: return -ENOMEM; case CPL_ERR_CONN_EXIST: return -EADDRINUSE; default: return -EIO; } } /* * Try and reuse skbs already allocated... */ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) { if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { skb_trim(skb, 0); skb_get(skb); skb_reset_transport_header(skb); } else { skb = alloc_skb(len, gfp); } return skb; } static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, __be32 peer_ip, __be16 local_port, __be16 peer_port, u8 tos) { struct rtable *rt; struct flowi4 fl4; rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, peer_port, local_port, IPPROTO_TCP, tos, 0); if (IS_ERR(rt)) return NULL; return rt; } static void arp_failure_discard(void *handle, struct sk_buff *skb) { PDBG("%s c4iw_dev %p\n", __func__, handle); kfree_skb(skb); } /* * Handle an ARP failure for an active open. */ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) { printk(KERN_ERR MOD "ARP failure duing connect\n"); kfree_skb(skb); } /* * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant * and send it along. */ static void abort_arp_failure(void *handle, struct sk_buff *skb) { struct c4iw_rdev *rdev = handle; struct cpl_abort_req *req = cplhdr(skb); PDBG("%s rdev %p\n", __func__, rdev); req->cmd = CPL_ABORT_NO_RST; c4iw_ofld_send(rdev, skb); } static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) { unsigned int flowclen = 80; struct fw_flowc_wr *flowc; int i; skb = get_skb(skb, flowclen, GFP_KERNEL); flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8)); flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 16)) | FW_WR_FLOWID(ep->hwtid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; flowc->mnemval[6].val = cpu_to_be32(snd_win); flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; flowc->mnemval[7].val = cpu_to_be32(ep->emss); /* Pad WR to 16 byte boundary */ flowc->mnemval[8].mnemonic = 0; flowc->mnemval[8].val = 0; for (i = 0; i < 9; i++) { flowc->mnemval[i].r4[0] = 0; flowc->mnemval[i].r4[1] = 0; flowc->mnemval[i].r4[2] = 0; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); c4iw_ofld_send(&ep->com.dev->rdev, skb); } static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) { struct cpl_close_con_req *req; struct sk_buff *skb; int wrlen = roundup(sizeof *req, 16); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); skb = get_skb(NULL, wrlen, gfp); if (!skb) { printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); t4_set_arp_err_handler(skb, NULL, arp_failure_discard); req = (struct cpl_close_con_req *) skb_put(skb, wrlen); memset(req, 0, wrlen); INIT_TP_WR(req, ep->hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) { struct cpl_abort_req *req; int wrlen = roundup(sizeof *req, 16); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); skb = get_skb(skb, wrlen, gfp); if (!skb) { printk(KERN_ERR MOD "%s - failed to alloc skb.\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); req = (struct cpl_abort_req *) skb_put(skb, wrlen); memset(req, 0, wrlen); INIT_TP_WR(req, ep->hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); req->cmd = CPL_ABORT_SEND_RST; return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static int send_connect(struct c4iw_ep *ep) { struct cpl_act_open_req *req; struct sk_buff *skb; u64 opt0; u32 opt2; unsigned int mtu_idx; int wscale; int wrlen = roundup(sizeof *req, 16); PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "%s - failed to alloc skb.\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); wscale = compute_wscale(rcv_win); opt0 = KEEP_ALIVE(1) | DELACK(1) | WND_SCALE(wscale) | MSS_IDX(mtu_idx) | L2T_IDX(ep->l2t->idx) | TX_CHAN(ep->tx_chan) | SMAC_SEL(ep->smac_idx) | DSCP(ep->tos) | ULP_MODE(ULP_MODE_TCPDDP) | RCV_BUFSIZ(rcv_win>>10); opt2 = RX_CHANNEL(0) | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); if (enable_tcp_timestamps) opt2 |= TSTAMPS_EN(1); if (enable_tcp_sack) opt2 |= SACK_EN(1); if (wscale && enable_tcp_window_scaling) opt2 |= WND_SCALE_EN(1); t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); req = (struct cpl_act_open_req *) skb_put(skb, wrlen); INIT_TP_WR(req, 0); OPCODE_TID(req) = cpu_to_be32( MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); req->local_port = ep->com.local_addr.sin_port; req->peer_port = ep->com.remote_addr.sin_port; req->local_ip = ep->com.local_addr.sin_addr.s_addr; req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; req->opt0 = cpu_to_be64(opt0); req->params = 0; req->opt2 = cpu_to_be32(opt2); return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, u8 mpa_rev_to_use) { int mpalen, wrlen; struct fw_ofld_tx_data_wr *req; struct mpa_message *mpa; struct mpa_v2_conn_params mpa_v2_params; PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); BUG_ON(skb_cloned(skb)); mpalen = sizeof(*mpa) + ep->plen; if (mpa_rev_to_use == 2) mpalen += sizeof(struct mpa_v2_conn_params); wrlen = roundup(mpalen + sizeof *req, 16); skb = get_skb(skb, wrlen, GFP_KERNEL); if (!skb) { connect_reply_upcall(ep, -ENOMEM); return; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); memset(req, 0, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL(1) | FW_WR_IMMDLEN(mpalen)); req->flowid_len16 = cpu_to_be32( FW_WR_FLOWID(ep->hwtid) | FW_WR_LEN16(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( FW_OFLD_TX_DATA_WR_FLUSH(1) | FW_OFLD_TX_DATA_WR_SHOVE(1)); mpa = (struct mpa_message *)(req + 1); memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); mpa->flags = (crc_enabled ? MPA_CRC : 0) | (markers_enabled ? MPA_MARKERS : 0) | (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); mpa->private_data_size = htons(ep->plen); mpa->revision = mpa_rev_to_use; if (mpa_rev_to_use == 1) { ep->tried_with_mpa_v1 = 1; ep->retry_with_mpa_v1 = 0; } if (mpa_rev_to_use == 2) { mpa->private_data_size += htons(sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); if (peer2peer) { mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) mpa_v2_params.ord |= htons(MPA_V2_RDMA_WRITE_RTR); else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) mpa_v2_params.ord |= htons(MPA_V2_RDMA_READ_RTR); } memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), ep->mpa_pkt + sizeof(*mpa), ep->plen); } else if (ep->plen) memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); /* * Reference the mpa skb. This ensures the data area * will remain in memory until the hw acks the tx. * Function fw4_ack() will deref it. */ skb_get(skb); t4_set_arp_err_handler(skb, NULL, arp_failure_discard); BUG_ON(ep->mpa_skb); ep->mpa_skb = skb; c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); start_ep_timer(ep); state_set(&ep->com, MPA_REQ_SENT); ep->mpa_attr.initiator = 1; return; } static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) { int mpalen, wrlen; struct fw_ofld_tx_data_wr *req; struct mpa_message *mpa; struct sk_buff *skb; struct mpa_v2_conn_params mpa_v2_params; PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); mpalen = sizeof(*mpa) + plen; if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) mpalen += sizeof(struct mpa_v2_conn_params); wrlen = roundup(mpalen + sizeof *req, 16); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); memset(req, 0, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL(1) | FW_WR_IMMDLEN(mpalen)); req->flowid_len16 = cpu_to_be32( FW_WR_FLOWID(ep->hwtid) | FW_WR_LEN16(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( FW_OFLD_TX_DATA_WR_FLUSH(1) | FW_OFLD_TX_DATA_WR_SHOVE(1)); mpa = (struct mpa_message *)(req + 1); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = MPA_REJECT; mpa->revision = mpa_rev; mpa->private_data_size = htons(plen); if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->private_data_size += htons(sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons(((u16)ep->ird) | (peer2peer ? MPA_V2_PEER2PEER_MODEL : 0)); mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE ? MPA_V2_RDMA_WRITE_RTR : p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ ? MPA_V2_RDMA_READ_RTR : 0) : 0)); memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), pdata, plen); } else if (plen) memcpy(mpa->private_data, pdata, plen); /* * Reference the mpa skb again. This ensures the data area * will remain in memory until the hw acks the tx. * Function fw4_ack() will deref it. */ skb_get(skb); set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); t4_set_arp_err_handler(skb, NULL, arp_failure_discard); BUG_ON(ep->mpa_skb); ep->mpa_skb = skb; return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) { int mpalen, wrlen; struct fw_ofld_tx_data_wr *req; struct mpa_message *mpa; struct sk_buff *skb; struct mpa_v2_conn_params mpa_v2_params; PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); mpalen = sizeof(*mpa) + plen; if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) mpalen += sizeof(struct mpa_v2_conn_params); wrlen = roundup(mpalen + sizeof *req, 16); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); memset(req, 0, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL(1) | FW_WR_IMMDLEN(mpalen)); req->flowid_len16 = cpu_to_be32( FW_WR_FLOWID(ep->hwtid) | FW_WR_LEN16(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( FW_OFLD_TX_DATA_WR_FLUSH(1) | FW_OFLD_TX_DATA_WR_SHOVE(1)); mpa = (struct mpa_message *)(req + 1); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | (markers_enabled ? MPA_MARKERS : 0); mpa->revision = ep->mpa_attr.version; mpa->private_data_size = htons(plen); if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->private_data_size += htons(sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); if (peer2peer && (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED)) { mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) mpa_v2_params.ord |= htons(MPA_V2_RDMA_WRITE_RTR); else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) mpa_v2_params.ord |= htons(MPA_V2_RDMA_READ_RTR); } memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), pdata, plen); } else if (plen) memcpy(mpa->private_data, pdata, plen); /* * Reference the mpa skb. This ensures the data area * will remain in memory until the hw acks the tx. * Function fw4_ack() will deref it. */ skb_get(skb); t4_set_arp_err_handler(skb, NULL, arp_failure_discard); ep->mpa_skb = skb; state_set(&ep->com, MPA_REP_SENT); return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_act_establish *req = cplhdr(skb); unsigned int tid = GET_TID(req); unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); struct tid_info *t = dev->rdev.lldi.tids; ep = lookup_atid(t, atid); PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); dst_confirm(ep->dst); /* setup the hwtid for this connection */ ep->hwtid = tid; cxgb4_insert_tid(t, ep, tid); ep->snd_seq = be32_to_cpu(req->snd_isn); ep->rcv_seq = be32_to_cpu(req->rcv_isn); set_emss(ep, ntohs(req->tcp_opt)); /* dealloc the atid */ cxgb4_free_atid(t, atid); /* start MPA negotiation */ send_flowc(ep, NULL); if (ep->retry_with_mpa_v1) send_mpa_req(ep, skb, 1); else send_mpa_req(ep, skb, mpa_rev); return 0; } static void close_complete_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CLOSE; if (ep->com.cm_id) { PDBG("close complete delivered ep %p cm_id %p tid %u\n", ep, ep->com.cm_id, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); ep->com.cm_id->rem_ref(ep->com.cm_id); ep->com.cm_id = NULL; ep->com.qp = NULL; } } static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) { PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); close_complete_upcall(ep); state_set(&ep->com, ABORTING); return send_abort(ep, skb, gfp); } static void peer_close_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_DISCONNECT; if (ep->com.cm_id) { PDBG("peer close delivered ep %p cm_id %p tid %u\n", ep, ep->com.cm_id, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); } } static void peer_abort_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CLOSE; event.status = -ECONNRESET; if (ep->com.cm_id) { PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, ep->com.cm_id, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); ep->com.cm_id->rem_ref(ep->com.cm_id); ep->com.cm_id = NULL; ep->com.qp = NULL; } } static void connect_reply_upcall(struct c4iw_ep *ep, int status) { struct iw_cm_event event; PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CONNECT_REPLY; event.status = status; event.local_addr = ep->com.local_addr; event.remote_addr = ep->com.remote_addr; if ((status == 0) || (status == -ECONNREFUSED)) { if (!ep->tried_with_mpa_v1) { /* this means MPA_v2 is used */ event.private_data_len = ep->plen - sizeof(struct mpa_v2_conn_params); event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + sizeof(struct mpa_v2_conn_params); } else { /* this means MPA_v1 is used */ event.private_data_len = ep->plen; event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); } } PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); ep->com.cm_id->event_handler(ep->com.cm_id, &event); if (status < 0) { ep->com.cm_id->rem_ref(ep->com.cm_id); ep->com.cm_id = NULL; ep->com.qp = NULL; } } static void connect_request_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CONNECT_REQUEST; event.local_addr = ep->com.local_addr; event.remote_addr = ep->com.remote_addr; event.provider_data = ep; if (!ep->tried_with_mpa_v1) { /* this means MPA_v2 is used */ event.ord = ep->ord; event.ird = ep->ird; event.private_data_len = ep->plen - sizeof(struct mpa_v2_conn_params); event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + sizeof(struct mpa_v2_conn_params); } else { /* this means MPA_v1 is used. Send max supported */ event.ord = c4iw_max_read_depth; event.ird = c4iw_max_read_depth; event.private_data_len = ep->plen; event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); } if (state_read(&ep->parent_ep->com) != DEAD) { c4iw_get_ep(&ep->com); ep->parent_ep->com.cm_id->event_handler( ep->parent_ep->com.cm_id, &event); } c4iw_put_ep(&ep->parent_ep->com); ep->parent_ep = NULL; } static void established_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_ESTABLISHED; event.ird = ep->ird; event.ord = ep->ord; if (ep->com.cm_id) { PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); } } static int update_rx_credits(struct c4iw_ep *ep, u32 credits) { struct cpl_rx_data_ack *req; struct sk_buff *skb; int wrlen = roundup(sizeof *req, 16); PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); return 0; } req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); memset(req, 0, wrlen); INIT_TP_WR(req, ep->hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | F_RX_DACK_CHANGE | V_RX_DACK_MODE(dack_mode)); set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); c4iw_ofld_send(&ep->com.dev->rdev, skb); return credits; } static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) { struct mpa_message *mpa; struct mpa_v2_conn_params *mpa_v2_params; u16 plen; u16 resp_ird, resp_ord; u8 rtr_mismatch = 0, insuff_ird = 0; struct c4iw_qp_attributes attrs; enum c4iw_qp_attr_mask mask; int err; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); /* * Stop mpa timer. If it expired, then the state has * changed and we bail since ep_timeout already aborted * the connection. */ stop_ep_timer(ep); if (state_read(&ep->com) != MPA_REQ_SENT) return; /* * If we get more than the supported amount of private data * then we must fail this connection. */ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { err = -EINVAL; goto err; } /* * copy the new data into our accumulation buffer. */ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), skb->len); ep->mpa_pkt_len += skb->len; /* * if we don't even have the mpa message, then bail. */ if (ep->mpa_pkt_len < sizeof(*mpa)) return; mpa = (struct mpa_message *) ep->mpa_pkt; /* Validate MPA header. */ if (mpa->revision > mpa_rev) { printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," " Received = %d\n", __func__, mpa_rev, mpa->revision); err = -EPROTO; goto err; } if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { err = -EPROTO; goto err; } plen = ntohs(mpa->private_data_size); /* * Fail if there's too much private data. */ if (plen > MPA_MAX_PRIVATE_DATA) { err = -EPROTO; goto err; } /* * If plen does not account for pkt size */ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { err = -EPROTO; goto err; } ep->plen = (u8) plen; /* * If we don't have all the pdata yet, then bail. * We'll continue process when more data arrives. */ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) return; if (mpa->flags & MPA_REJECT) { err = -ECONNREFUSED; goto err; } /* * If we get here we have accumulated the entire mpa * start reply message including private data. And * the MPA header is valid. */ state_set(&ep->com, FPDU_MODE); ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; ep->mpa_attr.recv_marker_enabled = markers_enabled; ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; ep->mpa_attr.version = mpa->revision; ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; if (mpa->revision == 2) { ep->mpa_attr.enhanced_rdma_conn = mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; if (ep->mpa_attr.enhanced_rdma_conn) { mpa_v2_params = (struct mpa_v2_conn_params *) (ep->mpa_pkt + sizeof(*mpa)); resp_ird = ntohs(mpa_v2_params->ird) & MPA_V2_IRD_ORD_MASK; resp_ord = ntohs(mpa_v2_params->ord) & MPA_V2_IRD_ORD_MASK; /* * This is a double-check. Ideally, below checks are * not required since ird/ord stuff has been taken * care of in c4iw_accept_cr */ if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { err = -ENOMEM; ep->ird = resp_ord; ep->ord = resp_ird; insuff_ird = 1; } if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) { if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_WRITE_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_RDMA_WRITE; else if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_READ_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; } } } else if (mpa->revision == 1) if (peer2peer) ep->mpa_attr.p2p_type = p2p_type; PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " "%d\n", __func__, ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, ep->mpa_attr.p2p_type, p2p_type); /* * If responder's RTR does not match with that of initiator, assign * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not * generated when moving QP to RTS state. * A TERM message will be sent after QP has moved to RTS state */ if ((ep->mpa_attr.version == 2) && peer2peer && (ep->mpa_attr.p2p_type != p2p_type)) { ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; rtr_mismatch = 1; } attrs.mpa_attr = ep->mpa_attr; attrs.max_ird = ep->ird; attrs.max_ord = ep->ord; attrs.llp_stream_handle = ep; attrs.next_state = C4IW_QP_STATE_RTS; mask = C4IW_QP_ATTR_NEXT_STATE | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; /* bind QP and TID with INIT_WR */ err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); if (err) goto err; /* * If responder's RTR requirement did not match with what initiator * supports, generate TERM message */ if (rtr_mismatch) { printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_NOMATCH_RTR; attrs.next_state = C4IW_QP_STATE_TERMINATE; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); err = -ENOMEM; goto out; } /* * Generate TERM if initiator IRD is not sufficient for responder * provided ORD. Currently, we do the same behaviour even when * responder provided IRD is also not sufficient as regards to * initiator ORD. */ if (insuff_ird) { printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", __func__); attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_INSUFF_IRD; attrs.next_state = C4IW_QP_STATE_TERMINATE; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); err = -ENOMEM; goto out; } goto out; err: state_set(&ep->com, ABORTING); send_abort(ep, skb, GFP_KERNEL); out: connect_reply_upcall(ep, err); return; } static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) { struct mpa_message *mpa; struct mpa_v2_conn_params *mpa_v2_params; u16 plen; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); if (state_read(&ep->com) != MPA_REQ_WAIT) return; /* * If we get more than the supported amount of private data * then we must fail this connection. */ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { stop_ep_timer(ep); abort_connection(ep, skb, GFP_KERNEL); return; } PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); /* * Copy the new data into our accumulation buffer. */ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), skb->len); ep->mpa_pkt_len += skb->len; /* * If we don't even have the mpa message, then bail. * We'll continue process when more data arrives. */ if (ep->mpa_pkt_len < sizeof(*mpa)) return; PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); stop_ep_timer(ep); mpa = (struct mpa_message *) ep->mpa_pkt; /* * Validate MPA Header. */ if (mpa->revision > mpa_rev) { printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," " Received = %d\n", __func__, mpa_rev, mpa->revision); abort_connection(ep, skb, GFP_KERNEL); return; } if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { abort_connection(ep, skb, GFP_KERNEL); return; } plen = ntohs(mpa->private_data_size); /* * Fail if there's too much private data. */ if (plen > MPA_MAX_PRIVATE_DATA) { abort_connection(ep, skb, GFP_KERNEL); return; } /* * If plen does not account for pkt size */ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { abort_connection(ep, skb, GFP_KERNEL); return; } ep->plen = (u8) plen; /* * If we don't have all the pdata yet, then bail. */ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) return; /* * If we get here we have accumulated the entire mpa * start reply message including private data. */ ep->mpa_attr.initiator = 0; ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; ep->mpa_attr.recv_marker_enabled = markers_enabled; ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; ep->mpa_attr.version = mpa->revision; if (mpa->revision == 1) ep->tried_with_mpa_v1 = 1; ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; if (mpa->revision == 2) { ep->mpa_attr.enhanced_rdma_conn = mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; if (ep->mpa_attr.enhanced_rdma_conn) { mpa_v2_params = (struct mpa_v2_conn_params *) (ep->mpa_pkt + sizeof(*mpa)); ep->ird = ntohs(mpa_v2_params->ird) & MPA_V2_IRD_ORD_MASK; ep->ord = ntohs(mpa_v2_params->ord) & MPA_V2_IRD_ORD_MASK; if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) if (peer2peer) { if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_WRITE_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_RDMA_WRITE; else if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_READ_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; } } } else if (mpa->revision == 1) if (peer2peer) ep->mpa_attr.p2p_type = p2p_type; PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, ep->mpa_attr.p2p_type); state_set(&ep->com, MPA_REQ_RCVD); /* drive upcall */ connect_request_upcall(ep); return; } static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_rx_data *hdr = cplhdr(skb); unsigned int dlen = ntohs(hdr->len); unsigned int tid = GET_TID(hdr); struct tid_info *t = dev->rdev.lldi.tids; ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); skb_pull(skb, sizeof(*hdr)); skb_trim(skb, dlen); ep->rcv_seq += dlen; BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); /* update RX credits */ update_rx_credits(ep, dlen); switch (state_read(&ep->com)) { case MPA_REQ_SENT: process_mpa_reply(ep, skb); break; case MPA_REQ_WAIT: process_mpa_request(ep, skb); break; case MPA_REP_SENT: break; default: printk(KERN_ERR MOD "%s Unexpected streaming data." " ep %p state %d tid %u\n", __func__, ep, state_read(&ep->com), ep->hwtid); /* * The ep will timeout and inform the ULP of the failure. * See ep_timeout(). */ break; } return 0; } static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_abort_rpl_rss *rpl = cplhdr(skb); int release = 0; unsigned int tid = GET_TID(rpl); struct tid_info *t = dev->rdev.lldi.tids; ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); BUG_ON(!ep); mutex_lock(&ep->com.mutex); switch (ep->com.state) { case ABORTING: __state_set(&ep->com, DEAD); release = 1; break; default: printk(KERN_ERR "%s ep %p state %d\n", __func__, ep, ep->com.state); break; } mutex_unlock(&ep->com.mutex); if (release) release_ep_resources(ep); return 0; } /* * Return whether a failed active open has allocated a TID */ static inline int act_open_has_tid(int status) { return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && status != CPL_ERR_ARP_MISS; } static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_act_open_rpl *rpl = cplhdr(skb); unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( ntohl(rpl->atid_status))); struct tid_info *t = dev->rdev.lldi.tids; int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); ep = lookup_atid(t, atid); PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, status, status2errno(status)); if (status == CPL_ERR_RTX_NEG_ADVICE) { printk(KERN_WARNING MOD "Connection problems for atid %u\n", atid); return 0; } connect_reply_upcall(ep, status2errno(status)); state_set(&ep->com, DEAD); if (status && act_open_has_tid(status)) cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); cxgb4_free_atid(t, atid); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); c4iw_put_ep(&ep->com); return 0; } static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_pass_open_rpl *rpl = cplhdr(skb); struct tid_info *t = dev->rdev.lldi.tids; unsigned int stid = GET_TID(rpl); struct c4iw_listen_ep *ep = lookup_stid(t, stid); if (!ep) { printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); return 0; } PDBG("%s ep %p status %d error %d\n", __func__, ep, rpl->status, status2errno(rpl->status)); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); return 0; } static int listen_stop(struct c4iw_listen_ep *ep) { struct sk_buff *skb; struct cpl_close_listsvr_req *req; PDBG("%s ep %p\n", __func__, ep); skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); return -ENOMEM; } req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); req->reply_ctrl = cpu_to_be16( QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); return c4iw_ofld_send(&ep->com.dev->rdev, skb); } static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); struct tid_info *t = dev->rdev.lldi.tids; unsigned int stid = GET_TID(rpl); struct c4iw_listen_ep *ep = lookup_stid(t, stid); PDBG("%s ep %p\n", __func__, ep); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); return 0; } static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, struct cpl_pass_accept_req *req) { struct cpl_pass_accept_rpl *rpl; unsigned int mtu_idx; u64 opt0; u32 opt2; int wscale; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); BUG_ON(skb_cloned(skb)); skb_trim(skb, sizeof(*rpl)); skb_get(skb); cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); wscale = compute_wscale(rcv_win); opt0 = KEEP_ALIVE(1) | DELACK(1) | WND_SCALE(wscale) | MSS_IDX(mtu_idx) | L2T_IDX(ep->l2t->idx) | TX_CHAN(ep->tx_chan) | SMAC_SEL(ep->smac_idx) | DSCP(ep->tos) | ULP_MODE(ULP_MODE_TCPDDP) | RCV_BUFSIZ(rcv_win>>10); opt2 = RX_CHANNEL(0) | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); if (enable_tcp_timestamps && req->tcpopt.tstamp) opt2 |= TSTAMPS_EN(1); if (enable_tcp_sack && req->tcpopt.sack) opt2 |= SACK_EN(1); if (wscale && enable_tcp_window_scaling) opt2 |= WND_SCALE_EN(1); rpl = cplhdr(skb); INIT_TP_WR(rpl, ep->hwtid); OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid)); rpl->opt0 = cpu_to_be64(opt0); rpl->opt2 = cpu_to_be32(opt2); set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); return; } static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, struct sk_buff *skb) { PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, peer_ip); BUG_ON(skb_cloned(skb)); skb_trim(skb, sizeof(struct cpl_tid_release)); skb_get(skb); release_tid(&dev->rdev, hwtid, skb); return; } static void get_4tuple(struct cpl_pass_accept_req *req, __be32 *local_ip, __be32 *peer_ip, __be16 *local_port, __be16 *peer_port) { int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); struct tcphdr *tcp = (struct tcphdr *) ((u8 *)(req + 1) + eth_len + ip_len); PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), ntohs(tcp->dest)); *peer_ip = ip->saddr; *local_ip = ip->daddr; *peer_port = tcp->source; *local_port = tcp->dest; return; } static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, struct c4iw_dev *cdev, bool clear_mpa_v1) { struct neighbour *n; int err, step; n = dst_neigh_lookup(dst, &peer_ip); if (!n) return -ENODEV; rcu_read_lock(); err = -ENOMEM; if (n->dev->flags & IFF_LOOPBACK) { struct net_device *pdev; pdev = ip_dev_find(&init_net, peer_ip); ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, n, pdev, 0); if (!ep->l2t) goto out; ep->mtu = pdev->mtu; ep->tx_chan = cxgb4_port_chan(pdev); ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; step = cdev->rdev.lldi.ntxq / cdev->rdev.lldi.nchan; ep->txq_idx = cxgb4_port_idx(pdev) * step; step = cdev->rdev.lldi.nrxq / cdev->rdev.lldi.nchan; ep->ctrlq_idx = cxgb4_port_idx(pdev); ep->rss_qid = cdev->rdev.lldi.rxq_ids[ cxgb4_port_idx(pdev) * step]; dev_put(pdev); } else { ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, n, n->dev, 0); if (!ep->l2t) goto out; ep->mtu = dst_mtu(dst); ep->tx_chan = cxgb4_port_chan(n->dev); ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; step = cdev->rdev.lldi.ntxq / cdev->rdev.lldi.nchan; ep->txq_idx = cxgb4_port_idx(n->dev) * step; ep->ctrlq_idx = cxgb4_port_idx(n->dev); step = cdev->rdev.lldi.nrxq / cdev->rdev.lldi.nchan; ep->rss_qid = cdev->rdev.lldi.rxq_ids[ cxgb4_port_idx(n->dev) * step]; if (clear_mpa_v1) { ep->retry_with_mpa_v1 = 0; ep->tried_with_mpa_v1 = 0; } } err = 0; out: rcu_read_unlock(); neigh_release(n); return err; } static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *child_ep, *parent_ep; struct cpl_pass_accept_req *req = cplhdr(skb); unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); struct tid_info *t = dev->rdev.lldi.tids; unsigned int hwtid = GET_TID(req); struct dst_entry *dst; struct rtable *rt; __be32 local_ip, peer_ip; __be16 local_port, peer_port; int err; parent_ep = lookup_stid(t, stid); PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); if (state_read(&parent_ep->com) != LISTEN) { printk(KERN_ERR "%s - listening ep not in LISTEN\n", __func__); goto reject; } /* Find output route */ rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, GET_POPEN_TOS(ntohl(req->tos_stid))); if (!rt) { printk(KERN_ERR MOD "%s - failed to find dst entry!\n", __func__); goto reject; } dst = &rt->dst; child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); if (!child_ep) { printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", __func__); dst_release(dst); goto reject; } err = import_ep(child_ep, peer_ip, dst, dev, false); if (err) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); dst_release(dst); kfree(child_ep); goto reject; } state_set(&child_ep->com, CONNECTING); child_ep->com.dev = dev; child_ep->com.cm_id = NULL; child_ep->com.local_addr.sin_family = PF_INET; child_ep->com.local_addr.sin_port = local_port; child_ep->com.local_addr.sin_addr.s_addr = local_ip; child_ep->com.remote_addr.sin_family = PF_INET; child_ep->com.remote_addr.sin_port = peer_port; child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; c4iw_get_ep(&parent_ep->com); child_ep->parent_ep = parent_ep; child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); child_ep->dst = dst; child_ep->hwtid = hwtid; PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); init_timer(&child_ep->timer); cxgb4_insert_tid(t, child_ep, hwtid); accept_cr(child_ep, peer_ip, skb, req); goto out; reject: reject_cr(dev, hwtid, peer_ip, skb); out: return 0; } static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_pass_establish *req = cplhdr(skb); struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(req); ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); ep->snd_seq = be32_to_cpu(req->snd_isn); ep->rcv_seq = be32_to_cpu(req->rcv_isn); set_emss(ep, ntohs(req->tcp_opt)); dst_confirm(ep->dst); state_set(&ep->com, MPA_REQ_WAIT); start_ep_timer(ep); send_flowc(ep, skb); return 0; } static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_peer_close *hdr = cplhdr(skb); struct c4iw_ep *ep; struct c4iw_qp_attributes attrs; int disconnect = 1; int release = 0; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(hdr); int ret; ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); dst_confirm(ep->dst); mutex_lock(&ep->com.mutex); switch (ep->com.state) { case MPA_REQ_WAIT: __state_set(&ep->com, CLOSING); break; case MPA_REQ_SENT: __state_set(&ep->com, CLOSING); connect_reply_upcall(ep, -ECONNRESET); break; case MPA_REQ_RCVD: /* * We're gonna mark this puppy DEAD, but keep * the reference on it until the ULP accepts or * rejects the CR. Also wake up anyone waiting * in rdma connection migration (see c4iw_accept_cr()). */ __state_set(&ep->com, CLOSING); PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); break; case MPA_REP_SENT: __state_set(&ep->com, CLOSING); PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); break; case FPDU_MODE: start_ep_timer(ep); __state_set(&ep->com, CLOSING); attrs.next_state = C4IW_QP_STATE_CLOSING; ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); if (ret != -ECONNRESET) { peer_close_upcall(ep); disconnect = 1; } break; case ABORTING: disconnect = 0; break; case CLOSING: __state_set(&ep->com, MORIBUND); disconnect = 0; break; case MORIBUND: stop_ep_timer(ep); if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_IDLE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } close_complete_upcall(ep); __state_set(&ep->com, DEAD); release = 1; disconnect = 0; break; case DEAD: disconnect = 0; break; default: BUG_ON(1); } mutex_unlock(&ep->com.mutex); if (disconnect) c4iw_ep_disconnect(ep, 0, GFP_KERNEL); if (release) release_ep_resources(ep); return 0; } /* * Returns whether an ABORT_REQ_RSS message is a negative advice. */ static int is_neg_adv_abort(unsigned int status) { return status == CPL_ERR_RTX_NEG_ADVICE || status == CPL_ERR_PERSIST_NEG_ADVICE; } static int c4iw_reconnect(struct c4iw_ep *ep) { struct rtable *rt; int err = 0; PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); init_timer(&ep->timer); /* * Allocate an active TID to initiate a TCP connection. */ ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); if (ep->atid == -1) { printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); err = -ENOMEM; goto fail2; } /* find a route */ rt = find_route(ep->com.dev, ep->com.cm_id->local_addr.sin_addr.s_addr, ep->com.cm_id->remote_addr.sin_addr.s_addr, ep->com.cm_id->local_addr.sin_port, ep->com.cm_id->remote_addr.sin_port, 0); if (!rt) { printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); err = -EHOSTUNREACH; goto fail3; } ep->dst = &rt->dst; err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr, ep->dst, ep->com.dev, false); if (err) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); goto fail4; } PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, ep->l2t->idx); state_set(&ep->com, CONNECTING); ep->tos = 0; /* send connect request to rnic */ err = send_connect(ep); if (!err) goto out; cxgb4_l2t_release(ep->l2t); fail4: dst_release(ep->dst); fail3: cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); fail2: /* * remember to send notification to upper layer. * We are in here so the upper layer is not aware that this is * re-connect attempt and so, upper layer is still waiting for * response of 1st connect request. */ connect_reply_upcall(ep, -ECONNRESET); c4iw_put_ep(&ep->com); out: return err; } static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_abort_req_rss *req = cplhdr(skb); struct c4iw_ep *ep; struct cpl_abort_rpl *rpl; struct sk_buff *rpl_skb; struct c4iw_qp_attributes attrs; int ret; int release = 0; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(req); ep = lookup_tid(t, tid); if (is_neg_adv_abort(req->status)) { PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, ep->hwtid); return 0; } PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, ep->com.state); /* * Wake up any threads in rdma_init() or rdma_fini(). * However, this is not needed if com state is just * MPA_REQ_SENT */ if (ep->com.state != MPA_REQ_SENT) c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); mutex_lock(&ep->com.mutex); switch (ep->com.state) { case CONNECTING: break; case MPA_REQ_WAIT: stop_ep_timer(ep); break; case MPA_REQ_SENT: stop_ep_timer(ep); if (mpa_rev == 2 && ep->tried_with_mpa_v1) connect_reply_upcall(ep, -ECONNRESET); else { /* * we just don't send notification upwards because we * want to retry with mpa_v1 without upper layers even * knowing it. * * do some housekeeping so as to re-initiate the * connection */ PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, mpa_rev); ep->retry_with_mpa_v1 = 1; } break; case MPA_REP_SENT: break; case MPA_REQ_RCVD: break; case MORIBUND: case CLOSING: stop_ep_timer(ep); /*FALLTHROUGH*/ case FPDU_MODE: if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_ERROR; ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); if (ret) printk(KERN_ERR MOD "%s - qp <- error failed!\n", __func__); } peer_abort_upcall(ep); break; case ABORTING: break; case DEAD: PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); mutex_unlock(&ep->com.mutex); return 0; default: BUG_ON(1); break; } dst_confirm(ep->dst); if (ep->com.state != ABORTING) { __state_set(&ep->com, DEAD); /* we don't release if we want to retry with mpa_v1 */ if (!ep->retry_with_mpa_v1) release = 1; } mutex_unlock(&ep->com.mutex); rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); if (!rpl_skb) { printk(KERN_ERR MOD "%s - cannot allocate skb!\n", __func__); release = 1; goto out; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); INIT_TP_WR(rpl, ep->hwtid); OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); rpl->cmd = CPL_ABORT_NO_RST; c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); out: if (release) release_ep_resources(ep); /* retry with mpa-v1 */ if (ep && ep->retry_with_mpa_v1) { cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); c4iw_reconnect(ep); } return 0; } static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct c4iw_qp_attributes attrs; struct cpl_close_con_rpl *rpl = cplhdr(skb); int release = 0; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(rpl); ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); BUG_ON(!ep); /* The cm_id may be null if we failed to connect */ mutex_lock(&ep->com.mutex); switch (ep->com.state) { case CLOSING: __state_set(&ep->com, MORIBUND); break; case MORIBUND: stop_ep_timer(ep); if ((ep->com.cm_id) && (ep->com.qp)) { attrs.next_state = C4IW_QP_STATE_IDLE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } close_complete_upcall(ep); __state_set(&ep->com, DEAD); release = 1; break; case ABORTING: case DEAD: break; default: BUG_ON(1); break; } mutex_unlock(&ep->com.mutex); if (release) release_ep_resources(ep); return 0; } static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_rdma_terminate *rpl = cplhdr(skb); struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(rpl); struct c4iw_ep *ep; struct c4iw_qp_attributes attrs; ep = lookup_tid(t, tid); BUG_ON(!ep); if (ep && ep->com.qp) { printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, ep->com.qp->wq.sq.qid); attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } else printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); return 0; } /* * Upcall from the adapter indicating data has been transmitted. * For us its just the single MPA request or reply. We can now free * the skb holding the mpa message. */ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_fw4_ack *hdr = cplhdr(skb); u8 credits = hdr->credits; unsigned int tid = GET_TID(hdr); struct tid_info *t = dev->rdev.lldi.tids; ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); if (credits == 0) { PDBG("%s 0 credit ack ep %p tid %u state %u\n", __func__, ep, ep->hwtid, state_read(&ep->com)); return 0; } dst_confirm(ep->dst); if (ep->mpa_skb) { PDBG("%s last streaming msg ack ep %p tid %u state %u " "initiator %u freeing skb\n", __func__, ep, ep->hwtid, state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); kfree_skb(ep->mpa_skb); ep->mpa_skb = NULL; } return 0; } int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { int err; struct c4iw_ep *ep = to_ep(cm_id); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); if (state_read(&ep->com) == DEAD) { c4iw_put_ep(&ep->com); return -ECONNRESET; } BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); if (mpa_rev == 0) abort_connection(ep, NULL, GFP_KERNEL); else { err = send_mpa_reject(ep, pdata, pdata_len); err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); } c4iw_put_ep(&ep->com); return 0; } int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { int err; struct c4iw_qp_attributes attrs; enum c4iw_qp_attr_mask mask; struct c4iw_ep *ep = to_ep(cm_id); struct c4iw_dev *h = to_c4iw_dev(cm_id->device); struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); if (state_read(&ep->com) == DEAD) { err = -ECONNRESET; goto err; } BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); BUG_ON(!qp); if ((conn_param->ord > c4iw_max_read_depth) || (conn_param->ird > c4iw_max_read_depth)) { abort_connection(ep, NULL, GFP_KERNEL); err = -EINVAL; goto err; } if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { if (conn_param->ord > ep->ird) { ep->ird = conn_param->ird; ep->ord = conn_param->ord; send_mpa_reject(ep, conn_param->private_data, conn_param->private_data_len); abort_connection(ep, NULL, GFP_KERNEL); err = -ENOMEM; goto err; } if (conn_param->ird > ep->ord) { if (!ep->ord) conn_param->ird = 1; else { abort_connection(ep, NULL, GFP_KERNEL); err = -ENOMEM; goto err; } } } ep->ird = conn_param->ird; ep->ord = conn_param->ord; if (ep->mpa_attr.version != 2) if (peer2peer && ep->ird == 0) ep->ird = 1; PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); cm_id->add_ref(cm_id); ep->com.cm_id = cm_id; ep->com.qp = qp; /* bind QP to EP and move to RTS */ attrs.mpa_attr = ep->mpa_attr; attrs.max_ird = ep->ird; attrs.max_ord = ep->ord; attrs.llp_stream_handle = ep; attrs.next_state = C4IW_QP_STATE_RTS; /* bind QP and TID with INIT_WR */ mask = C4IW_QP_ATTR_NEXT_STATE | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); if (err) goto err1; err = send_mpa_reply(ep, conn_param->private_data, conn_param->private_data_len); if (err) goto err1; state_set(&ep->com, FPDU_MODE); established_upcall(ep); c4iw_put_ep(&ep->com); return 0; err1: ep->com.cm_id = NULL; ep->com.qp = NULL; cm_id->rem_ref(cm_id); err: c4iw_put_ep(&ep->com); return err; } int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); struct c4iw_ep *ep; struct rtable *rt; int err = 0; if ((conn_param->ord > c4iw_max_read_depth) || (conn_param->ird > c4iw_max_read_depth)) { err = -EINVAL; goto out; } ep = alloc_ep(sizeof(*ep), GFP_KERNEL); if (!ep) { printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); err = -ENOMEM; goto out; } init_timer(&ep->timer); ep->plen = conn_param->private_data_len; if (ep->plen) memcpy(ep->mpa_pkt + sizeof(struct mpa_message), conn_param->private_data, ep->plen); ep->ird = conn_param->ird; ep->ord = conn_param->ord; if (peer2peer && ep->ord == 0) ep->ord = 1; cm_id->add_ref(cm_id); ep->com.dev = dev; ep->com.cm_id = cm_id; ep->com.qp = get_qhp(dev, conn_param->qpn); BUG_ON(!ep->com.qp); PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, ep->com.qp, cm_id); /* * Allocate an active TID to initiate a TCP connection. */ ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); if (ep->atid == -1) { printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); err = -ENOMEM; goto fail2; } PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, ntohl(cm_id->local_addr.sin_addr.s_addr), ntohs(cm_id->local_addr.sin_port), ntohl(cm_id->remote_addr.sin_addr.s_addr), ntohs(cm_id->remote_addr.sin_port)); /* find a route */ rt = find_route(dev, cm_id->local_addr.sin_addr.s_addr, cm_id->remote_addr.sin_addr.s_addr, cm_id->local_addr.sin_port, cm_id->remote_addr.sin_port, 0); if (!rt) { printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); err = -EHOSTUNREACH; goto fail3; } ep->dst = &rt->dst; err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr, ep->dst, ep->com.dev, true); if (err) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); goto fail4; } PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, ep->l2t->idx); state_set(&ep->com, CONNECTING); ep->tos = 0; ep->com.local_addr = cm_id->local_addr; ep->com.remote_addr = cm_id->remote_addr; /* send connect request to rnic */ err = send_connect(ep); if (!err) goto out; cxgb4_l2t_release(ep->l2t); fail4: dst_release(ep->dst); fail3: cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); fail2: cm_id->rem_ref(cm_id); c4iw_put_ep(&ep->com); out: return err; } int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) { int err = 0; struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); struct c4iw_listen_ep *ep; might_sleep(); ep = alloc_ep(sizeof(*ep), GFP_KERNEL); if (!ep) { printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); err = -ENOMEM; goto fail1; } PDBG("%s ep %p\n", __func__, ep); cm_id->add_ref(cm_id); ep->com.cm_id = cm_id; ep->com.dev = dev; ep->backlog = backlog; ep->com.local_addr = cm_id->local_addr; /* * Allocate a server TID. */ ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); if (ep->stid == -1) { printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); err = -ENOMEM; goto fail2; } state_set(&ep->com, LISTEN); c4iw_init_wr_wait(&ep->com.wr_wait); err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, ep->com.local_addr.sin_addr.s_addr, ep->com.local_addr.sin_port, ep->com.dev->rdev.lldi.rxq_ids[0]); if (err) goto fail3; /* wait for pass_open_rpl */ err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, __func__); if (!err) { cm_id->provider_data = ep; goto out; } fail3: cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); fail2: cm_id->rem_ref(cm_id); c4iw_put_ep(&ep->com); fail1: out: return err; } int c4iw_destroy_listen(struct iw_cm_id *cm_id) { int err; struct c4iw_listen_ep *ep = to_listen_ep(cm_id); PDBG("%s ep %p\n", __func__, ep); might_sleep(); state_set(&ep->com, DEAD); c4iw_init_wr_wait(&ep->com.wr_wait); err = listen_stop(ep); if (err) goto done; err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, __func__); cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); done: cm_id->rem_ref(cm_id); c4iw_put_ep(&ep->com); return err; } int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) { int ret = 0; int close = 0; int fatal = 0; struct c4iw_rdev *rdev; mutex_lock(&ep->com.mutex); PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, states[ep->com.state], abrupt); rdev = &ep->com.dev->rdev; if (c4iw_fatal_error(rdev)) { fatal = 1; close_complete_upcall(ep); ep->com.state = DEAD; } switch (ep->com.state) { case MPA_REQ_WAIT: case MPA_REQ_SENT: case MPA_REQ_RCVD: case MPA_REP_SENT: case FPDU_MODE: close = 1; if (abrupt) ep->com.state = ABORTING; else { ep->com.state = CLOSING; start_ep_timer(ep); } set_bit(CLOSE_SENT, &ep->com.flags); break; case CLOSING: if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { close = 1; if (abrupt) { stop_ep_timer(ep); ep->com.state = ABORTING; } else ep->com.state = MORIBUND; } break; case MORIBUND: case ABORTING: case DEAD: PDBG("%s ignoring disconnect ep %p state %u\n", __func__, ep, ep->com.state); break; default: BUG(); break; } if (close) { if (abrupt) { close_complete_upcall(ep); ret = send_abort(ep, NULL, gfp); } else ret = send_halfclose(ep, gfp); if (ret) fatal = 1; } mutex_unlock(&ep->com.mutex); if (fatal) release_ep_resources(ep); return ret; } static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_fw6_msg *rpl = cplhdr(skb); c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); return 0; } /* * These are the real handlers that are called from a * work queue. */ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { [CPL_ACT_ESTABLISH] = act_establish, [CPL_ACT_OPEN_RPL] = act_open_rpl, [CPL_RX_DATA] = rx_data, [CPL_ABORT_RPL_RSS] = abort_rpl, [CPL_ABORT_RPL] = abort_rpl, [CPL_PASS_OPEN_RPL] = pass_open_rpl, [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, [CPL_PASS_ACCEPT_REQ] = pass_accept_req, [CPL_PASS_ESTABLISH] = pass_establish, [CPL_PEER_CLOSE] = peer_close, [CPL_ABORT_REQ_RSS] = peer_abort, [CPL_CLOSE_CON_RPL] = close_con_rpl, [CPL_RDMA_TERMINATE] = terminate, [CPL_FW4_ACK] = fw4_ack, [CPL_FW6_MSG] = async_event }; static void process_timeout(struct c4iw_ep *ep) { struct c4iw_qp_attributes attrs; int abort = 1; mutex_lock(&ep->com.mutex); PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, ep->com.state); switch (ep->com.state) { case MPA_REQ_SENT: __state_set(&ep->com, ABORTING); connect_reply_upcall(ep, -ETIMEDOUT); break; case MPA_REQ_WAIT: __state_set(&ep->com, ABORTING); break; case CLOSING: case MORIBUND: if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_ERROR; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } __state_set(&ep->com, ABORTING); break; default: printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n", __func__, ep, ep->hwtid, ep->com.state); WARN_ON(1); abort = 0; } mutex_unlock(&ep->com.mutex); if (abort) abort_connection(ep, NULL, GFP_KERNEL); c4iw_put_ep(&ep->com); } static void process_timedout_eps(void) { struct c4iw_ep *ep; spin_lock_irq(&timeout_lock); while (!list_empty(&timeout_list)) { struct list_head *tmp; tmp = timeout_list.next; list_del(tmp); spin_unlock_irq(&timeout_lock); ep = list_entry(tmp, struct c4iw_ep, entry); process_timeout(ep); spin_lock_irq(&timeout_lock); } spin_unlock_irq(&timeout_lock); } static void process_work(struct work_struct *work) { struct sk_buff *skb = NULL; struct c4iw_dev *dev; struct cpl_act_establish *rpl; unsigned int opcode; int ret; while ((skb = skb_dequeue(&rxq))) { rpl = cplhdr(skb); dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); opcode = rpl->ot.opcode; BUG_ON(!work_handlers[opcode]); ret = work_handlers[opcode](dev, skb); if (!ret) kfree_skb(skb); } process_timedout_eps(); } static DECLARE_WORK(skb_work, process_work); static void ep_timeout(unsigned long arg) { struct c4iw_ep *ep = (struct c4iw_ep *)arg; spin_lock(&timeout_lock); list_add_tail(&ep->entry, &timeout_list); spin_unlock(&timeout_lock); queue_work(workq, &skb_work); } /* * All the CM events are handled on a work queue to have a safe context. */ static int sched(struct c4iw_dev *dev, struct sk_buff *skb) { /* * Save dev in the skb->cb area. */ *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; /* * Queue the skb and schedule the worker thread. */ skb_queue_tail(&rxq, skb); queue_work(workq, &skb_work); return 0; } static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_set_tcb_rpl *rpl = cplhdr(skb); if (rpl->status != CPL_ERR_NONE) { printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " "for tid %u\n", rpl->status, GET_TID(rpl)); } kfree_skb(skb); return 0; } static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_fw6_msg *rpl = cplhdr(skb); struct c4iw_wr_wait *wr_waitp; int ret; PDBG("%s type %u\n", __func__, rpl->type); switch (rpl->type) { case 1: ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); if (wr_waitp) c4iw_wake_up(wr_waitp, ret ? -ret : 0); kfree_skb(skb); break; case 2: sched(dev, skb); break; default: printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, rpl->type); kfree_skb(skb); break; } return 0; } static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_abort_req_rss *req = cplhdr(skb); struct c4iw_ep *ep; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(req); ep = lookup_tid(t, tid); if (!ep) { printk(KERN_WARNING MOD "Abort on non-existent endpoint, tid %d\n", tid); kfree_skb(skb); return 0; } if (is_neg_adv_abort(req->status)) { PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, ep->hwtid); kfree_skb(skb); return 0; } PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, ep->com.state); /* * Wake up any threads in rdma_init() or rdma_fini(). */ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); sched(dev, skb); return 0; } /* * Most upcalls from the T4 Core go to sched() to * schedule the processing on a work queue. */ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { [CPL_ACT_ESTABLISH] = sched, [CPL_ACT_OPEN_RPL] = sched, [CPL_RX_DATA] = sched, [CPL_ABORT_RPL_RSS] = sched, [CPL_ABORT_RPL] = sched, [CPL_PASS_OPEN_RPL] = sched, [CPL_CLOSE_LISTSRV_RPL] = sched, [CPL_PASS_ACCEPT_REQ] = sched, [CPL_PASS_ESTABLISH] = sched, [CPL_PEER_CLOSE] = sched, [CPL_CLOSE_CON_RPL] = sched, [CPL_ABORT_REQ_RSS] = peer_abort_intr, [CPL_RDMA_TERMINATE] = sched, [CPL_FW4_ACK] = sched, [CPL_SET_TCB_RPL] = set_tcb_rpl, [CPL_FW6_MSG] = fw6_msg }; int __init c4iw_cm_init(void) { spin_lock_init(&timeout_lock); skb_queue_head_init(&rxq); workq = create_singlethread_workqueue("iw_cxgb4"); if (!workq) return -ENOMEM; return 0; } void __exit c4iw_cm_term(void) { WARN_ON(!list_empty(&timeout_list)); flush_workqueue(workq); destroy_workqueue(workq); }
gpl-2.0
JohnnySun/RaspberryPi_Lnux_Kernel_JohnnySun_Multiboot
drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
2296
9120
/* * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de> * Copyright (C) 2005-2009 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ #include <linux/export.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/io.h> #include "imx-ipu-v3.h" #include "ipu-prv.h" #define DMFC_RD_CHAN 0x0000 #define DMFC_WR_CHAN 0x0004 #define DMFC_WR_CHAN_DEF 0x0008 #define DMFC_DP_CHAN 0x000c #define DMFC_DP_CHAN_DEF 0x0010 #define DMFC_GENERAL1 0x0014 #define DMFC_GENERAL2 0x0018 #define DMFC_IC_CTRL 0x001c #define DMFC_STAT 0x0020 #define DMFC_WR_CHAN_1_28 0 #define DMFC_WR_CHAN_2_41 8 #define DMFC_WR_CHAN_1C_42 16 #define DMFC_WR_CHAN_2C_43 24 #define DMFC_DP_CHAN_5B_23 0 #define DMFC_DP_CHAN_5F_27 8 #define DMFC_DP_CHAN_6B_24 16 #define DMFC_DP_CHAN_6F_29 24 #define DMFC_FIFO_SIZE_64 (3 << 3) #define DMFC_FIFO_SIZE_128 (2 << 3) #define DMFC_FIFO_SIZE_256 (1 << 3) #define DMFC_FIFO_SIZE_512 (0 << 3) #define DMFC_SEGMENT(x) ((x & 0x7) << 0) #define DMFC_BURSTSIZE_128 (0 << 6) #define DMFC_BURSTSIZE_64 (1 << 6) #define DMFC_BURSTSIZE_32 (2 << 6) #define DMFC_BURSTSIZE_16 (3 << 6) struct dmfc_channel_data { int ipu_channel; unsigned long channel_reg; unsigned long shift; unsigned eot_shift; unsigned max_fifo_lines; }; static const struct dmfc_channel_data dmfcdata[] = { { .ipu_channel = 23, .channel_reg = DMFC_DP_CHAN, .shift = DMFC_DP_CHAN_5B_23, .eot_shift = 20, .max_fifo_lines = 3, }, { .ipu_channel = 24, .channel_reg = DMFC_DP_CHAN, .shift = DMFC_DP_CHAN_6B_24, .eot_shift = 22, .max_fifo_lines = 1, }, { .ipu_channel = 27, .channel_reg = DMFC_DP_CHAN, .shift = DMFC_DP_CHAN_5F_27, .eot_shift = 21, .max_fifo_lines = 2, }, { .ipu_channel = 28, .channel_reg = DMFC_WR_CHAN, .shift = DMFC_WR_CHAN_1_28, .eot_shift = 16, .max_fifo_lines = 2, }, { .ipu_channel = 29, .channel_reg = DMFC_DP_CHAN, .shift = DMFC_DP_CHAN_6F_29, .eot_shift = 23, .max_fifo_lines = 1, }, }; #define DMFC_NUM_CHANNELS ARRAY_SIZE(dmfcdata) struct ipu_dmfc_priv; struct dmfc_channel { unsigned slots; unsigned slotmask; unsigned segment; int burstsize; struct ipu_soc *ipu; struct ipu_dmfc_priv *priv; const struct dmfc_channel_data *data; }; struct ipu_dmfc_priv { struct ipu_soc *ipu; struct device *dev; struct dmfc_channel channels[DMFC_NUM_CHANNELS]; struct mutex mutex; unsigned long bandwidth_per_slot; void __iomem *base; int use_count; }; int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc) { struct ipu_dmfc_priv *priv = dmfc->priv; mutex_lock(&priv->mutex); if (!priv->use_count) ipu_module_enable(priv->ipu, IPU_CONF_DMFC_EN); priv->use_count++; mutex_unlock(&priv->mutex); return 0; } EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel); void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc) { struct ipu_dmfc_priv *priv = dmfc->priv; mutex_lock(&priv->mutex); priv->use_count--; if (!priv->use_count) ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN); if (priv->use_count < 0) priv->use_count = 0; mutex_unlock(&priv->mutex); } EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel); static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots, int segment, int burstsize) { struct ipu_dmfc_priv *priv = dmfc->priv; u32 val, field; dev_dbg(priv->dev, "dmfc: using %d slots starting from segment %d for IPU channel %d\n", slots, segment, dmfc->data->ipu_channel); if (!dmfc) return -EINVAL; switch (slots) { case 1: field = DMFC_FIFO_SIZE_64; break; case 2: field = DMFC_FIFO_SIZE_128; break; case 4: field = DMFC_FIFO_SIZE_256; break; case 8: field = DMFC_FIFO_SIZE_512; break; default: return -EINVAL; } switch (burstsize) { case 16: field |= DMFC_BURSTSIZE_16; break; case 32: field |= DMFC_BURSTSIZE_32; break; case 64: field |= DMFC_BURSTSIZE_64; break; case 128: field |= DMFC_BURSTSIZE_128; break; } field |= DMFC_SEGMENT(segment); val = readl(priv->base + dmfc->data->channel_reg); val &= ~(0xff << dmfc->data->shift); val |= field << dmfc->data->shift; writel(val, priv->base + dmfc->data->channel_reg); dmfc->slots = slots; dmfc->segment = segment; dmfc->burstsize = burstsize; dmfc->slotmask = ((1 << slots) - 1) << segment; return 0; } static int dmfc_bandwidth_to_slots(struct ipu_dmfc_priv *priv, unsigned long bandwidth) { int slots = 1; while (slots * priv->bandwidth_per_slot < bandwidth) slots *= 2; return slots; } static int dmfc_find_slots(struct ipu_dmfc_priv *priv, int slots) { unsigned slotmask_need, slotmask_used = 0; int i, segment = 0; slotmask_need = (1 << slots) - 1; for (i = 0; i < DMFC_NUM_CHANNELS; i++) slotmask_used |= priv->channels[i].slotmask; while (slotmask_need <= 0xff) { if (!(slotmask_used & slotmask_need)) return segment; slotmask_need <<= 1; segment++; } return -EBUSY; } void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc) { struct ipu_dmfc_priv *priv = dmfc->priv; int i; dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n", dmfc->slots, dmfc->segment); mutex_lock(&priv->mutex); if (!dmfc->slots) goto out; dmfc->slotmask = 0; dmfc->slots = 0; dmfc->segment = 0; for (i = 0; i < DMFC_NUM_CHANNELS; i++) priv->channels[i].slotmask = 0; for (i = 0; i < DMFC_NUM_CHANNELS; i++) { if (priv->channels[i].slots > 0) { priv->channels[i].segment = dmfc_find_slots(priv, priv->channels[i].slots); priv->channels[i].slotmask = ((1 << priv->channels[i].slots) - 1) << priv->channels[i].segment; } } for (i = 0; i < DMFC_NUM_CHANNELS; i++) { if (priv->channels[i].slots > 0) ipu_dmfc_setup_channel(&priv->channels[i], priv->channels[i].slots, priv->channels[i].segment, priv->channels[i].burstsize); } out: mutex_unlock(&priv->mutex); } EXPORT_SYMBOL_GPL(ipu_dmfc_free_bandwidth); int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc, unsigned long bandwidth_pixel_per_second, int burstsize) { struct ipu_dmfc_priv *priv = dmfc->priv; int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second); int segment = 0, ret = 0; dev_dbg(priv->dev, "dmfc: trying to allocate %ldMpixel/s for IPU channel %d\n", bandwidth_pixel_per_second / 1000000, dmfc->data->ipu_channel); ipu_dmfc_free_bandwidth(dmfc); mutex_lock(&priv->mutex); if (slots > 8) { ret = -EBUSY; goto out; } segment = dmfc_find_slots(priv, slots); if (segment < 0) { ret = -EBUSY; goto out; } ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize); out: mutex_unlock(&priv->mutex); return ret; } EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth); int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width) { struct ipu_dmfc_priv *priv = dmfc->priv; u32 dmfc_gen1; dmfc_gen1 = readl(priv->base + DMFC_GENERAL1); if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines) dmfc_gen1 |= 1 << dmfc->data->eot_shift; else dmfc_gen1 &= ~(1 << dmfc->data->eot_shift); writel(dmfc_gen1, priv->base + DMFC_GENERAL1); return 0; } EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel); struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel) { struct ipu_dmfc_priv *priv = ipu->dmfc_priv; int i; for (i = 0; i < DMFC_NUM_CHANNELS; i++) if (dmfcdata[i].ipu_channel == ipu_channel) return &priv->channels[i]; return ERR_PTR(-ENODEV); } EXPORT_SYMBOL_GPL(ipu_dmfc_get); void ipu_dmfc_put(struct dmfc_channel *dmfc) { ipu_dmfc_free_bandwidth(dmfc); } EXPORT_SYMBOL_GPL(ipu_dmfc_put); int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base, struct clk *ipu_clk) { struct ipu_dmfc_priv *priv; int i; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base = devm_ioremap(dev, base, PAGE_SIZE); if (!priv->base) return -ENOMEM; priv->dev = dev; priv->ipu = ipu; mutex_init(&priv->mutex); ipu->dmfc_priv = priv; for (i = 0; i < DMFC_NUM_CHANNELS; i++) { priv->channels[i].priv = priv; priv->channels[i].ipu = ipu; priv->channels[i].data = &dmfcdata[i]; } writel(0x0, priv->base + DMFC_WR_CHAN); writel(0x0, priv->base + DMFC_DP_CHAN); /* * We have a total bandwidth of clkrate * 4pixel divided * into 8 slots. */ priv->bandwidth_per_slot = clk_get_rate(ipu_clk) / 8; dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n", priv->bandwidth_per_slot / 1000000); writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF); writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF); writel(0x00000003, priv->base + DMFC_GENERAL1); return 0; } void ipu_dmfc_exit(struct ipu_soc *ipu) { }
gpl-2.0
m0zes/linux
drivers/oprofile/oprofile_perf.c
2296
7225
/* * Copyright 2010 ARM Ltd. * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter * * Perf-events backend for OProfile. */ #include <linux/perf_event.h> #include <linux/platform_device.h> #include <linux/oprofile.h> #include <linux/slab.h> /* * Per performance monitor configuration as set via oprofilefs. */ struct op_counter_config { unsigned long count; unsigned long enabled; unsigned long event; unsigned long unit_mask; unsigned long kernel; unsigned long user; struct perf_event_attr attr; }; static int oprofile_perf_enabled; static DEFINE_MUTEX(oprofile_perf_mutex); static struct op_counter_config *counter_config; static DEFINE_PER_CPU(struct perf_event **, perf_events); static int num_counters; /* * Overflow callback for oprofile. */ static void op_overflow_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { int id; u32 cpu = smp_processor_id(); for (id = 0; id < num_counters; ++id) if (per_cpu(perf_events, cpu)[id] == event) break; if (id != num_counters) oprofile_add_sample(regs, id); else pr_warning("oprofile: ignoring spurious overflow " "on cpu %u\n", cpu); } /* * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile * settings in counter_config. Attributes are created as `pinned' events and * so are permanently scheduled on the PMU. */ static void op_perf_setup(void) { int i; u32 size = sizeof(struct perf_event_attr); struct perf_event_attr *attr; for (i = 0; i < num_counters; ++i) { attr = &counter_config[i].attr; memset(attr, 0, size); attr->type = PERF_TYPE_RAW; attr->size = size; attr->config = counter_config[i].event; attr->sample_period = counter_config[i].count; attr->pinned = 1; } } static int op_create_counter(int cpu, int event) { struct perf_event *pevent; if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) return 0; pevent = perf_event_create_kernel_counter(&counter_config[event].attr, cpu, NULL, op_overflow_handler, NULL); if (IS_ERR(pevent)) return PTR_ERR(pevent); if (pevent->state != PERF_EVENT_STATE_ACTIVE) { perf_event_release_kernel(pevent); pr_warning("oprofile: failed to enable event %d " "on CPU %d\n", event, cpu); return -EBUSY; } per_cpu(perf_events, cpu)[event] = pevent; return 0; } static void op_destroy_counter(int cpu, int event) { struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; if (pevent) { perf_event_release_kernel(pevent); per_cpu(perf_events, cpu)[event] = NULL; } } /* * Called by oprofile_perf_start to create active perf events based on the * perviously configured attributes. */ static int op_perf_start(void) { int cpu, event, ret = 0; for_each_online_cpu(cpu) { for (event = 0; event < num_counters; ++event) { ret = op_create_counter(cpu, event); if (ret) return ret; } } return ret; } /* * Called by oprofile_perf_stop at the end of a profiling run. */ static void op_perf_stop(void) { int cpu, event; for_each_online_cpu(cpu) for (event = 0; event < num_counters; ++event) op_destroy_counter(cpu, event); } static int oprofile_perf_create_files(struct dentry *root) { unsigned int i; for (i = 0; i < num_counters; i++) { struct dentry *dir; char buf[4]; snprintf(buf, sizeof buf, "%d", i); dir = oprofilefs_mkdir(root, buf); oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled); oprofilefs_create_ulong(dir, "event", &counter_config[i].event); oprofilefs_create_ulong(dir, "count", &counter_config[i].count); oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask); oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel); oprofilefs_create_ulong(dir, "user", &counter_config[i].user); } return 0; } static int oprofile_perf_setup(void) { raw_spin_lock(&oprofilefs_lock); op_perf_setup(); raw_spin_unlock(&oprofilefs_lock); return 0; } static int oprofile_perf_start(void) { int ret = -EBUSY; mutex_lock(&oprofile_perf_mutex); if (!oprofile_perf_enabled) { ret = 0; op_perf_start(); oprofile_perf_enabled = 1; } mutex_unlock(&oprofile_perf_mutex); return ret; } static void oprofile_perf_stop(void) { mutex_lock(&oprofile_perf_mutex); if (oprofile_perf_enabled) op_perf_stop(); oprofile_perf_enabled = 0; mutex_unlock(&oprofile_perf_mutex); } #ifdef CONFIG_PM static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state) { mutex_lock(&oprofile_perf_mutex); if (oprofile_perf_enabled) op_perf_stop(); mutex_unlock(&oprofile_perf_mutex); return 0; } static int oprofile_perf_resume(struct platform_device *dev) { mutex_lock(&oprofile_perf_mutex); if (oprofile_perf_enabled && op_perf_start()) oprofile_perf_enabled = 0; mutex_unlock(&oprofile_perf_mutex); return 0; } static struct platform_driver oprofile_driver = { .driver = { .name = "oprofile-perf", }, .resume = oprofile_perf_resume, .suspend = oprofile_perf_suspend, }; static struct platform_device *oprofile_pdev; static int __init init_driverfs(void) { int ret; ret = platform_driver_register(&oprofile_driver); if (ret) return ret; oprofile_pdev = platform_device_register_simple( oprofile_driver.driver.name, 0, NULL, 0); if (IS_ERR(oprofile_pdev)) { ret = PTR_ERR(oprofile_pdev); platform_driver_unregister(&oprofile_driver); } return ret; } static void exit_driverfs(void) { platform_device_unregister(oprofile_pdev); platform_driver_unregister(&oprofile_driver); } #else static inline int init_driverfs(void) { return 0; } static inline void exit_driverfs(void) { } #endif /* CONFIG_PM */ void oprofile_perf_exit(void) { int cpu, id; struct perf_event *event; for_each_possible_cpu(cpu) { for (id = 0; id < num_counters; ++id) { event = per_cpu(perf_events, cpu)[id]; if (event) perf_event_release_kernel(event); } kfree(per_cpu(perf_events, cpu)); } kfree(counter_config); exit_driverfs(); } int __init oprofile_perf_init(struct oprofile_operations *ops) { int cpu, ret = 0; ret = init_driverfs(); if (ret) return ret; num_counters = perf_num_counters(); if (num_counters <= 0) { pr_info("oprofile: no performance counters\n"); ret = -ENODEV; goto out; } counter_config = kcalloc(num_counters, sizeof(struct op_counter_config), GFP_KERNEL); if (!counter_config) { pr_info("oprofile: failed to allocate %d " "counters\n", num_counters); ret = -ENOMEM; num_counters = 0; goto out; } for_each_possible_cpu(cpu) { per_cpu(perf_events, cpu) = kcalloc(num_counters, sizeof(struct perf_event *), GFP_KERNEL); if (!per_cpu(perf_events, cpu)) { pr_info("oprofile: failed to allocate %d perf events " "for cpu %d\n", num_counters, cpu); ret = -ENOMEM; goto out; } } ops->create_files = oprofile_perf_create_files; ops->setup = oprofile_perf_setup; ops->start = oprofile_perf_start; ops->stop = oprofile_perf_stop; ops->shutdown = oprofile_perf_stop; ops->cpu_type = op_name_from_perf_id(); if (!ops->cpu_type) ret = -ENODEV; else pr_info("oprofile: using %s\n", ops->cpu_type); out: if (ret) oprofile_perf_exit(); return ret; }
gpl-2.0
Galaxy-J5/android_kernel_samsung_j5nlte
arch/frv/kernel/signal.c
2552
11580
/* signal.c: FRV specific bits of signal handling * * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - Derived from arch/m68k/kernel/signal.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/personality.h> #include <linux/tracehook.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> #define DEBUG_SIG 0 struct fdpic_func_descriptor { unsigned long text; unsigned long GOT; }; /* * Do a signal return; undo the signal stack. */ struct sigframe { __sigrestore_t pretcode; int sig; struct sigcontext sc; unsigned long extramask[_NSIG_WORDS-1]; uint32_t retcode[2]; }; struct rt_sigframe { __sigrestore_t pretcode; int sig; struct siginfo __user *pinfo; void __user *puc; struct siginfo info; struct ucontext uc; uint32_t retcode[2]; }; static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8) { struct user_context *user = current->thread.user; unsigned long tbr, psr; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; tbr = user->i.tbr; psr = user->i.psr; if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context))) goto badframe; user->i.tbr = tbr; user->i.psr = psr; restore_user_regs(user); user->i.syscallno = -1; /* disable syscall checks */ *_gr8 = user->i.gr[8]; return 0; badframe: return 1; } asmlinkage int sys_sigreturn(void) { struct sigframe __user *frame = (struct sigframe __user *) __frame->sp; sigset_t set; int gr8; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.sc_oldmask)) goto badframe; if (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(&frame->sc, &gr8)) goto badframe; return gr8; badframe: force_sig(SIGSEGV, current); return 0; } asmlinkage int sys_rt_sigreturn(void) { struct rt_sigframe __user *frame = (struct rt_sigframe __user *) __frame->sp; sigset_t set; int gr8; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(&frame->uc.uc_mcontext, &gr8)) goto badframe; if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return gr8; badframe: force_sig(SIGSEGV, current); return 0; } /* * Set up a signal frame */ static int setup_sigcontext(struct sigcontext __user *sc, unsigned long mask) { save_user_regs(current->thread.user); if (copy_to_user(&sc->sc_context, current->thread.user, sizeof(sc->sc_context)) != 0) goto badframe; /* non-iBCS2 extensions.. */ if (__put_user(mask, &sc->sc_oldmask) < 0) goto badframe; return 0; badframe: return 1; } /*****************************************************************************/ /* * Determine which stack to use.. */ static inline void __user *get_sigframe(struct k_sigaction *ka, size_t frame_size) { unsigned long sp; /* Default to using normal stack */ sp = __frame->sp; /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (! sas_ss_flags(sp)) sp = current->sas_ss_sp + current->sas_ss_size; } return (void __user *) ((sp - frame_size) & ~7UL); } /* end get_sigframe() */ /*****************************************************************************/ /* * */ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set) { struct sigframe __user *frame; int rsig; set_fs(USER_DS); frame = get_sigframe(ka, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; rsig = sig; if (sig < 32 && __current_thread_info->exec_domain && __current_thread_info->exec_domain->signal_invmap) rsig = __current_thread_info->exec_domain->signal_invmap[sig]; if (__put_user(rsig, &frame->sig) < 0) goto give_sigsegv; if (setup_sigcontext(&frame->sc, set->sig[0])) goto give_sigsegv; if (_NSIG_WORDS > 1) { if (__copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask))) goto give_sigsegv; } /* Set up to return from userspace. If provided, use a stub * already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { if (__put_user(ka->sa.sa_restorer, &frame->pretcode) < 0) goto give_sigsegv; } else { /* Set up the following code on the stack: * setlos #__NR_sigreturn,gr7 * tira gr0,0 */ if (__put_user((__sigrestore_t)frame->retcode, &frame->pretcode) || __put_user(0x8efc0000|__NR_sigreturn, &frame->retcode[0]) || __put_user(0xc0700000, &frame->retcode[1])) goto give_sigsegv; flush_icache_range((unsigned long) frame->retcode, (unsigned long) (frame->retcode + 2)); } /* Set up registers for the signal handler */ if (current->personality & FDPIC_FUNCPTRS) { struct fdpic_func_descriptor __user *funcptr = (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; struct fdpic_func_descriptor desc; if (copy_from_user(&desc, funcptr, sizeof(desc))) goto give_sigsegv; __frame->pc = desc.text; __frame->gr15 = desc.GOT; } else { __frame->pc = (unsigned long) ka->sa.sa_handler; __frame->gr15 = 0; } __frame->sp = (unsigned long) frame; __frame->lr = (unsigned long) &frame->retcode; __frame->gr8 = sig; #if DEBUG_SIG printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n", sig, current->comm, current->pid, frame, __frame->pc, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } /* end setup_frame() */ /*****************************************************************************/ /* * */ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set) { struct rt_sigframe __user *frame; int rsig; set_fs(USER_DS); frame = get_sigframe(ka, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; rsig = sig; if (sig < 32 && __current_thread_info->exec_domain && __current_thread_info->exec_domain->signal_invmap) rsig = __current_thread_info->exec_domain->signal_invmap[sig]; if (__put_user(rsig, &frame->sig) || __put_user(&frame->info, &frame->pinfo) || __put_user(&frame->uc, &frame->puc)) goto give_sigsegv; if (copy_siginfo_to_user(&frame->info, info)) goto give_sigsegv; /* Create the ucontext. */ if (__put_user(0, &frame->uc.uc_flags) || __put_user(NULL, &frame->uc.uc_link) || __save_altstack(&frame->uc.uc_stack, __frame->sp)) goto give_sigsegv; if (setup_sigcontext(&frame->uc.uc_mcontext, set->sig[0])) goto give_sigsegv; if (__copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set))) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub * already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { if (__put_user(ka->sa.sa_restorer, &frame->pretcode)) goto give_sigsegv; } else { /* Set up the following code on the stack: * setlos #__NR_sigreturn,gr7 * tira gr0,0 */ if (__put_user((__sigrestore_t)frame->retcode, &frame->pretcode) || __put_user(0x8efc0000|__NR_rt_sigreturn, &frame->retcode[0]) || __put_user(0xc0700000, &frame->retcode[1])) goto give_sigsegv; flush_icache_range((unsigned long) frame->retcode, (unsigned long) (frame->retcode + 2)); } /* Set up registers for signal handler */ if (current->personality & FDPIC_FUNCPTRS) { struct fdpic_func_descriptor __user *funcptr = (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; struct fdpic_func_descriptor desc; if (copy_from_user(&desc, funcptr, sizeof(desc))) goto give_sigsegv; __frame->pc = desc.text; __frame->gr15 = desc.GOT; } else { __frame->pc = (unsigned long) ka->sa.sa_handler; __frame->gr15 = 0; } __frame->sp = (unsigned long) frame; __frame->lr = (unsigned long) &frame->retcode; __frame->gr8 = sig; __frame->gr9 = (unsigned long) &frame->info; #if DEBUG_SIG printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n", sig, current->comm, current->pid, frame, __frame->pc, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } /* end setup_rt_frame() */ /*****************************************************************************/ /* * OK, we're invoking a handler */ static void handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka) { sigset_t *oldset = sigmask_to_save(); int ret; /* Are we from a system call? */ if (__frame->syscallno != -1) { /* If so, check system call restarting.. */ switch (__frame->gr8) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: __frame->gr8 = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { __frame->gr8 = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: __frame->gr8 = __frame->orig_gr8; __frame->pc -= 4; } __frame->syscallno = -1; } /* Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(sig, ka, info, oldset); else ret = setup_frame(sig, ka, oldset); if (ret) return; signal_delivered(sig, info, ka, __frame, test_thread_flag(TIF_SINGLESTEP)); } /* end handle_signal() */ /*****************************************************************************/ /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ static void do_signal(void) { struct k_sigaction ka; siginfo_t info; int signr; signr = get_signal_to_deliver(&info, &ka, __frame, NULL); if (signr > 0) { handle_signal(signr, &info, &ka); return; } /* Did we come from a system call? */ if (__frame->syscallno != -1) { /* Restart the system call - no handlers present */ switch (__frame->gr8) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: __frame->gr8 = __frame->orig_gr8; __frame->pc -= 4; break; case -ERESTART_RESTARTBLOCK: __frame->gr7 = __NR_restart_syscall; __frame->pc -= 4; break; } __frame->syscallno = -1; } /* if there's no signal to deliver, we just put the saved sigmask * back */ restore_saved_sigmask(); } /* end do_signal() */ /*****************************************************************************/ /* * notification of userspace execution resumption * - triggered by the TIF_WORK_MASK flags */ asmlinkage void do_notify_resume(__u32 thread_info_flags) { /* pending single-step? */ if (thread_info_flags & _TIF_SINGLESTEP) clear_thread_flag(TIF_SINGLESTEP); /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(); /* deal with notification on about to resume userspace execution */ if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(__frame); } } /* end do_notify_resume() */
gpl-2.0
Tommy-Geenexus/android_kernel_sony_msm8994_suzuran_6.0.x
drivers/irqchip/irq-metag.c
3064
9589
/* * Meta internal (HWSTATMETA) interrupt code. * * Copyright (C) 2011-2012 Imagination Technologies Ltd. * * This code is based on the code in SoC/common/irq.c and SoC/comet/irq.c * The code base could be generalised/merged as a lot of the functionality is * similar. Until this is done, we try to keep the code simple here. */ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irqdomain.h> #include <asm/irq.h> #include <asm/hwthread.h> #define PERF0VECINT 0x04820580 #define PERF1VECINT 0x04820588 #define PERF0TRIG_OFFSET 16 #define PERF1TRIG_OFFSET 17 /** * struct metag_internal_irq_priv - private meta internal interrupt data * @domain: IRQ domain for all internal Meta IRQs (HWSTATMETA) * @unmasked: Record of unmasked IRQs */ struct metag_internal_irq_priv { struct irq_domain *domain; unsigned long unmasked; }; /* Private data for the one and only internal interrupt controller */ static struct metag_internal_irq_priv metag_internal_irq_priv; static unsigned int metag_internal_irq_startup(struct irq_data *data); static void metag_internal_irq_shutdown(struct irq_data *data); static void metag_internal_irq_ack(struct irq_data *data); static void metag_internal_irq_mask(struct irq_data *data); static void metag_internal_irq_unmask(struct irq_data *data); #ifdef CONFIG_SMP static int metag_internal_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, bool force); #endif static struct irq_chip internal_irq_edge_chip = { .name = "HWSTATMETA-IRQ", .irq_startup = metag_internal_irq_startup, .irq_shutdown = metag_internal_irq_shutdown, .irq_ack = metag_internal_irq_ack, .irq_mask = metag_internal_irq_mask, .irq_unmask = metag_internal_irq_unmask, #ifdef CONFIG_SMP .irq_set_affinity = metag_internal_irq_set_affinity, #endif }; /* * metag_hwvec_addr - get the address of *VECINT regs of irq * * This function is a table of supported triggers on HWSTATMETA * Could do with a structure, but better keep it simple. Changes * in this code should be rare. */ static inline void __iomem *metag_hwvec_addr(irq_hw_number_t hw) { void __iomem *addr; switch (hw) { case PERF0TRIG_OFFSET: addr = (void __iomem *)PERF0VECINT; break; case PERF1TRIG_OFFSET: addr = (void __iomem *)PERF1VECINT; break; default: addr = NULL; break; } return addr; } /* * metag_internal_startup - setup an internal irq * @irq: the irq to startup * * Multiplex interrupts for @irq onto TR1. Clear any pending * interrupts. */ static unsigned int metag_internal_irq_startup(struct irq_data *data) { /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */ metag_internal_irq_ack(data); /* Enable the interrupt by unmasking it */ metag_internal_irq_unmask(data); return 0; } /* * metag_internal_irq_shutdown - turn off the irq * @irq: the irq number to turn off * * Mask @irq and clear any pending interrupts. * Stop muxing @irq onto TR1. */ static void metag_internal_irq_shutdown(struct irq_data *data) { /* Disable the IRQ at the core by masking it. */ metag_internal_irq_mask(data); /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */ metag_internal_irq_ack(data); } /* * metag_internal_irq_ack - acknowledge irq * @irq: the irq to ack */ static void metag_internal_irq_ack(struct irq_data *data) { irq_hw_number_t hw = data->hwirq; unsigned int bit = 1 << hw; if (metag_in32(HWSTATMETA) & bit) metag_out32(bit, HWSTATMETA); } /** * metag_internal_irq_mask() - mask an internal irq by unvectoring * @data: data for the internal irq to mask * * HWSTATMETA has no mask register. Instead the IRQ is unvectored from the core * and retriggered if necessary later. */ static void metag_internal_irq_mask(struct irq_data *data) { struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; irq_hw_number_t hw = data->hwirq; void __iomem *vec_addr = metag_hwvec_addr(hw); clear_bit(hw, &priv->unmasked); /* there is no interrupt mask, so unvector the interrupt */ metag_out32(0, vec_addr); } /** * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring * @data: data for the internal irq to unmask * * HWSTATMETA has no mask register. Instead the IRQ is revectored back to the * core and retriggered if necessary. */ static void metag_internal_irq_unmask(struct irq_data *data) { struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; irq_hw_number_t hw = data->hwirq; unsigned int bit = 1 << hw; void __iomem *vec_addr = metag_hwvec_addr(hw); unsigned int thread = hard_processor_id(); set_bit(hw, &priv->unmasked); /* there is no interrupt mask, so revector the interrupt */ metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), vec_addr); /* * Re-trigger interrupt * * Writing a 1 toggles, and a 0->1 transition triggers. We only * retrigger if the status bit is already set, which means we * need to clear it first. Retriggering is fundamentally racy * because if the interrupt fires again after we clear it we * could end up clearing it again and the interrupt handler * thinking it hasn't fired. Therefore we need to keep trying to * retrigger until the bit is set. */ if (metag_in32(HWSTATMETA) & bit) { metag_out32(bit, HWSTATMETA); while (!(metag_in32(HWSTATMETA) & bit)) metag_out32(bit, HWSTATMETA); } } #ifdef CONFIG_SMP /* * metag_internal_irq_set_affinity - set the affinity for an interrupt */ static int metag_internal_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, bool force) { unsigned int cpu, thread; irq_hw_number_t hw = data->hwirq; /* * Wire up this interrupt from *VECINT to the Meta core. * * Note that we can't wire up *VECINT to interrupt more than * one cpu (the interrupt code doesn't support it), so we just * pick the first cpu we find in 'cpumask'. */ cpu = cpumask_any_and(cpumask, cpu_online_mask); thread = cpu_2_hwthread_id[cpu]; metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), metag_hwvec_addr(hw)); return 0; } #endif /* * metag_internal_irq_demux - irq de-multiplexer * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * The cpu receives an interrupt on TR1 when an interrupt has * occurred. It is this function's job to demux this irq and * figure out exactly which trigger needs servicing. */ static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc) { struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc); irq_hw_number_t hw; unsigned int irq_no; u32 status; recalculate: status = metag_in32(HWSTATMETA) & priv->unmasked; for (hw = 0; status != 0; status >>= 1, ++hw) { if (status & 0x1) { /* * Map the hardware IRQ number to a virtual Linux IRQ * number. */ irq_no = irq_linear_revmap(priv->domain, hw); /* * Only fire off interrupts that are * registered to be handled by the kernel. * Other interrupts are probably being * handled by other Meta hardware threads. */ generic_handle_irq(irq_no); /* * The handler may have re-enabled interrupts * which could have caused a nested invocation * of this code and make the copy of the * status register we are using invalid. */ goto recalculate; } } } /** * internal_irq_map() - Map an internal meta IRQ to a virtual IRQ number. * @hw: Number of the internal IRQ. Must be in range. * * Returns: The virtual IRQ number of the Meta internal IRQ specified by * @hw. */ int internal_irq_map(unsigned int hw) { struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; if (!priv->domain) return -ENODEV; return irq_create_mapping(priv->domain, hw); } /** * metag_internal_irq_init_cpu - regsister with the Meta cpu * @cpu: the CPU to register on * * Configure @cpu's TR1 irq so that we can demux irqs. */ static void metag_internal_irq_init_cpu(struct metag_internal_irq_priv *priv, int cpu) { unsigned int thread = cpu_2_hwthread_id[cpu]; unsigned int signum = TBID_SIGNUM_TR1(thread); int irq = tbisig_map(signum); /* Register the multiplexed IRQ handler */ irq_set_handler_data(irq, priv); irq_set_chained_handler(irq, metag_internal_irq_demux); irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); } /** * metag_internal_intc_map() - map an internal irq * @d: irq domain of internal trigger block * @irq: virtual irq number * @hw: hardware irq number within internal trigger block * * This sets up a virtual irq for a specified hardware interrupt. The irq chip * and handler is configured. */ static int metag_internal_intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { /* only register interrupt if it is mapped */ if (!metag_hwvec_addr(hw)) return -EINVAL; irq_set_chip_and_handler(irq, &internal_irq_edge_chip, handle_edge_irq); return 0; } static const struct irq_domain_ops metag_internal_intc_domain_ops = { .map = metag_internal_intc_map, }; /** * metag_internal_irq_register - register internal IRQs * * Register the irq chip and handler function for all internal IRQs */ int __init init_internal_IRQ(void) { struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; unsigned int cpu; /* Set up an IRQ domain */ priv->domain = irq_domain_add_linear(NULL, 32, &metag_internal_intc_domain_ops, priv); if (unlikely(!priv->domain)) { pr_err("meta-internal-intc: cannot add IRQ domain\n"); return -ENOMEM; } /* Setup TR1 for all cpus. */ for_each_possible_cpu(cpu) metag_internal_irq_init_cpu(priv, cpu); return 0; };
gpl-2.0
robertoalcantara/linux-sunxi_craff
drivers/net/ethernet/sun/sunqe.c
4856
25881
/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. * Once again I am out to prove that every ethernet * controller out there can be most efficiently programmed * if you make it look like a LANCE. * * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #include <asm/idprom.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/auxio.h> #include <asm/pgtable.h> #include <asm/irq.h> #include "sunqe.h" #define DRV_NAME "sunqe" #define DRV_VERSION "4.1" #define DRV_RELDATE "August 27, 2008" #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver"); MODULE_LICENSE("GPL"); static struct sunqec *root_qec_dev; static void qe_set_multicast(struct net_device *dev); #define QEC_RESET_TRIES 200 static inline int qec_global_reset(void __iomem *gregs) { int tries = QEC_RESET_TRIES; sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); while (--tries) { u32 tmp = sbus_readl(gregs + GLOB_CTRL); if (tmp & GLOB_CTRL_RESET) { udelay(20); continue; } break; } if (tries) return 0; printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n"); return -1; } #define MACE_RESET_RETRIES 200 #define QE_RESET_RETRIES 200 static inline int qe_stop(struct sunqe *qep) { void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; int tries; /* Reset the MACE, then the QEC channel. */ sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); tries = MACE_RESET_RETRIES; while (--tries) { u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); if (tmp & MREGS_BCONFIG_RESET) { udelay(20); continue; } break; } if (!tries) { printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); return -1; } sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); tries = QE_RESET_RETRIES; while (--tries) { u32 tmp = sbus_readl(cregs + CREG_CTRL); if (tmp & CREG_CTRL_RESET) { udelay(20); continue; } break; } if (!tries) { printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); return -1; } return 0; } static void qe_init_rings(struct sunqe *qep) { struct qe_init_block *qb = qep->qe_block; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; int i; qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; memset(qb, 0, sizeof(struct qe_init_block)); memset(qbufs, 0, sizeof(struct sunqe_buffers)); for (i = 0; i < RX_RING_SIZE; i++) { qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); qb->qe_rxd[i].rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); } } static int qe_init(struct sunqe *qep, int from_irq) { struct sunqec *qecp = qep->parent; void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; void __iomem *gregs = qecp->gregs; unsigned char *e = &qep->dev->dev_addr[0]; u32 tmp; int i; /* Shut it up. */ if (qe_stop(qep)) return -EAGAIN; /* Setup initial rx/tx init block pointers. */ sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); /* Enable/mask the various irq's. */ sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(1, cregs + CREG_TIMASK); sbus_writel(0, cregs + CREG_QMASK); sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); /* Setup the FIFO pointers into QEC local memory. */ tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); sbus_writel(tmp, cregs + CREG_RXRBUFPTR); sbus_writel(tmp, cregs + CREG_RXWBUFPTR); tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + sbus_readl(gregs + GLOB_RSIZE); sbus_writel(tmp, cregs + CREG_TXRBUFPTR); sbus_writel(tmp, cregs + CREG_TXWBUFPTR); /* Clear the channel collision counter. */ sbus_writel(0, cregs + CREG_CCNT); /* For 10baseT, inter frame space nor throttle seems to be necessary. */ sbus_writel(0, cregs + CREG_PIPG); /* Now dork with the AMD MACE. */ sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); sbus_writeb(0, mregs + MREGS_RXFCNTL); /* The QEC dma's the rx'd packets from local memory out to main memory, * and therefore it interrupts when the packet reception is "complete". * So don't listen for the MACE talking about it. */ sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), mregs + MREGS_FCONFIG); /* Only usable interface on QuadEther is twisted pair. */ sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); /* Tell MACE we are changing the ether address. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, mregs + MREGS_IACONFIG); while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); sbus_writeb(e[0], mregs + MREGS_ETHADDR); sbus_writeb(e[1], mregs + MREGS_ETHADDR); sbus_writeb(e[2], mregs + MREGS_ETHADDR); sbus_writeb(e[3], mregs + MREGS_ETHADDR); sbus_writeb(e[4], mregs + MREGS_ETHADDR); sbus_writeb(e[5], mregs + MREGS_ETHADDR); /* Clear out the address filter. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, mregs + MREGS_IACONFIG); while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) sbus_writeb(0, mregs + MREGS_FILTER); /* Address changes are now complete. */ sbus_writeb(0, mregs + MREGS_IACONFIG); qe_init_rings(qep); /* Wait a little bit for the link to come up... */ mdelay(5); if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { int tries = 50; while (--tries) { u8 tmp; mdelay(5); barrier(); tmp = sbus_readb(mregs + MREGS_PHYCONFIG); if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) break; } if (tries == 0) printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); } /* Missed packet counter is cleared on a read. */ sbus_readb(mregs + MREGS_MPCNT); /* Reload multicast information, this will enable the receiver * and transmitter. */ qe_set_multicast(qep->dev); /* QEC should now start to show interrupts. */ return 0; } /* Grrr, certain error conditions completely lock up the AMD MACE, * so when we get these we _must_ reset the chip. */ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) { struct net_device *dev = qep->dev; int mace_hwbug_workaround = 0; if (qe_status & CREG_STAT_EDEFER) { printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); dev->stats.tx_errors++; } if (qe_status & CREG_STAT_CLOSS) { printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_carrier_errors++; } if (qe_status & CREG_STAT_ERETRIES) { printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_LCOLL) { printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); dev->stats.tx_errors++; dev->stats.collisions++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_FUFLOW) { printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_JERROR) { printk(KERN_ERR "%s: Jabber error.\n", dev->name); } if (qe_status & CREG_STAT_BERROR) { printk(KERN_ERR "%s: Babble error.\n", dev->name); } if (qe_status & CREG_STAT_CCOFLOW) { dev->stats.tx_errors += 256; dev->stats.collisions += 256; } if (qe_status & CREG_STAT_TXDERROR) { printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXLERR) { printk(KERN_ERR "%s: Transmit late error.\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXPERR) { printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXSERR) { printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RCCOFLOW) { dev->stats.rx_errors += 256; dev->stats.collisions += 256; } if (qe_status & CREG_STAT_RUOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_over_errors += 256; } if (qe_status & CREG_STAT_MCOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_missed_errors += 256; } if (qe_status & CREG_STAT_RXFOFLOW) { printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_over_errors++; } if (qe_status & CREG_STAT_RLCOLL) { printk(KERN_ERR "%s: Late receive collision.\n", dev->name); dev->stats.rx_errors++; dev->stats.collisions++; } if (qe_status & CREG_STAT_FCOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_frame_errors += 256; } if (qe_status & CREG_STAT_CECOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_crc_errors += 256; } if (qe_status & CREG_STAT_RXDROP) { printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_dropped++; dev->stats.rx_missed_errors++; } if (qe_status & CREG_STAT_RXSMALL) { printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_length_errors++; } if (qe_status & CREG_STAT_RXLERR) { printk(KERN_ERR "%s: Receive late error.\n", dev->name); dev->stats.rx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXPERR) { printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXSERR) { printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (mace_hwbug_workaround) qe_init(qep, 1); return mace_hwbug_workaround; } /* Per-QE receive interrupt service routine. Just like on the happy meal * we receive directly into skb's with a small packet copy water mark. */ static void qe_rx(struct sunqe *qep) { struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; struct net_device *dev = qep->dev; struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; int elem = qep->rx_new, drops = 0; u32 flags; this = &rxbase[elem]; while (!((flags = this->rx_flags) & RXD_OWN)) { struct sk_buff *skb; unsigned char *this_qbuf = &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; __u32 this_qbuf_dvma = qbufs_dvma + qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); struct qe_rxd *end_rxd = &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ /* Check for errors. */ if (len < ETH_ZLEN) { dev->stats.rx_errors++; dev->stats.rx_length_errors++; dev->stats.rx_dropped++; } else { skb = netdev_alloc_skb(dev, len + 2); if (skb == NULL) { drops++; dev->stats.rx_dropped++; } else { skb_reserve(skb, 2); skb_put(skb, len); skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, len); skb->protocol = eth_type_trans(skb, qep->dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } } end_rxd->rx_addr = this_qbuf_dvma; end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); elem = NEXT_RX(elem); this = &rxbase[elem]; } qep->rx_new = elem; if (drops) printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name); } static void qe_tx_reclaim(struct sunqe *qep); /* Interrupts for all QE's get filtered out via the QEC master controller, * so we just run through each qe and check to see who is signaling * and thus needs to be serviced. */ static irqreturn_t qec_interrupt(int irq, void *dev_id) { struct sunqec *qecp = dev_id; u32 qec_status; int channel = 0; /* Latch the status now. */ qec_status = sbus_readl(qecp->gregs + GLOB_STAT); while (channel < 4) { if (qec_status & 0xf) { struct sunqe *qep = qecp->qes[channel]; u32 qe_status; qe_status = sbus_readl(qep->qcregs + CREG_STAT); if (qe_status & CREG_STAT_ERRORS) { if (qe_is_bolixed(qep, qe_status)) goto next; } if (qe_status & CREG_STAT_RXIRQ) qe_rx(qep); if (netif_queue_stopped(qep->dev) && (qe_status & CREG_STAT_TXIRQ)) { spin_lock(&qep->lock); qe_tx_reclaim(qep); if (TX_BUFFS_AVAIL(qep) > 0) { /* Wake net queue and return to * lazy tx reclaim. */ netif_wake_queue(qep->dev); sbus_writel(1, qep->qcregs + CREG_TIMASK); } spin_unlock(&qep->lock); } next: ; } qec_status >>= 4; channel++; } return IRQ_HANDLED; } static int qe_open(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); qep->mconfig = (MREGS_MCONFIG_TXENAB | MREGS_MCONFIG_RXENAB | MREGS_MCONFIG_MBAENAB); return qe_init(qep, 0); } static int qe_close(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); qe_stop(qep); return 0; } /* Reclaim TX'd frames from the ring. This must always run under * the IRQ protected qep->lock. */ static void qe_tx_reclaim(struct sunqe *qep) { struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; int elem = qep->tx_old; while (elem != qep->tx_new) { u32 flags = txbase[elem].tx_flags; if (flags & TXD_OWN) break; elem = NEXT_TX(elem); } qep->tx_old = elem; } static void qe_tx_timeout(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); int tx_full; spin_lock_irq(&qep->lock); /* Try to reclaim, if that frees up some tx * entries, we're fine. */ qe_tx_reclaim(qep); tx_full = TX_BUFFS_AVAIL(qep) <= 0; spin_unlock_irq(&qep->lock); if (! tx_full) goto out; printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); qe_init(qep, 1); out: netif_wake_queue(dev); } /* Get a packet queued to go onto the wire. */ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct sunqe_buffers *qbufs = qep->buffers; __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; unsigned char *txbuf; int len, entry; spin_lock_irq(&qep->lock); qe_tx_reclaim(qep); len = skb->len; entry = qep->tx_new; txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; txbuf_dvma = qbufs_dvma + qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); /* Avoid a race... */ qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; skb_copy_from_linear_data(skb, txbuf, len); qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; qep->qe_block->qe_txd[entry].tx_flags = (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); qep->tx_new = NEXT_TX(entry); /* Get it going. */ sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); dev->stats.tx_packets++; dev->stats.tx_bytes += len; if (TX_BUFFS_AVAIL(qep) <= 0) { /* Halt the net queue and enable tx interrupts. * When the tx queue empties the tx irq handler * will wake up the queue and return us back to * the lazy tx reclaim scheme. */ netif_stop_queue(dev); sbus_writel(0, qep->qcregs + CREG_TIMASK); } spin_unlock_irq(&qep->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } static void qe_set_multicast(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct netdev_hw_addr *ha; u8 new_mconfig = qep->mconfig; int i; u32 crc; /* Lock out others. */ netif_stop_queue(dev); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, qep->mregs + MREGS_IACONFIG); while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) sbus_writeb(0xff, qep->mregs + MREGS_FILTER); sbus_writeb(0, qep->mregs + MREGS_IACONFIG); } else if (dev->flags & IFF_PROMISC) { new_mconfig |= MREGS_MCONFIG_PROMISC; } else { u16 hash_table[4]; u8 *hbytes = (unsigned char *) &hash_table[0]; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } /* Program the qe with the new filter value. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, qep->mregs + MREGS_IACONFIG); while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) { u8 tmp = *hbytes++; sbus_writeb(tmp, qep->mregs + MREGS_FILTER); } sbus_writeb(0, qep->mregs + MREGS_IACONFIG); } /* Any change of the logical address filter, the physical address, * or enabling/disabling promiscuous mode causes the MACE to disable * the receiver. So we must re-enable them here or else the MACE * refuses to listen to anything on the network. Sheesh, took * me a day or two to find this bug. */ qep->mconfig = new_mconfig; sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); /* Let us get going again. */ netif_wake_queue(dev); } /* Ethtool support... */ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { const struct linux_prom_registers *regs; struct sunqe *qep = netdev_priv(dev); struct platform_device *op; strcpy(info->driver, "sunqe"); strcpy(info->version, "3.0"); op = qep->op; regs = of_get_property(op->dev.of_node, "reg", NULL); if (regs) sprintf(info->bus_info, "SBUS:%d", regs->which_io); } static u32 qe_get_link(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); void __iomem *mregs = qep->mregs; u8 phyconfig; spin_lock_irq(&qep->lock); phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); spin_unlock_irq(&qep->lock); return phyconfig & MREGS_PHYCONFIG_LSTAT; } static const struct ethtool_ops qe_ethtool_ops = { .get_drvinfo = qe_get_drvinfo, .get_link = qe_get_link, }; /* This is only called once at boot time for each card probed. */ static void qec_init_once(struct sunqec *qecp, struct platform_device *op) { u8 bsizes = qecp->qec_bursts; if (sbus_can_burst64() && (bsizes & DMA_BURST64)) { sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); } else if (bsizes & DMA_BURST32) { sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); } else { sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); } /* Packetsize only used in 100baseT BigMAC configurations, * set it to zero just to be on the safe side. */ sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); /* Set the local memsize register, divided up to one piece per QE channel. */ sbus_writel((resource_size(&op->resource[1]) >> 2), qecp->gregs + GLOB_MSIZE); /* Divide up the local QEC memory amongst the 4 QE receiver and * transmitter FIFOs. Basically it is (total / 2 / num_channels). */ sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, qecp->gregs + GLOB_TSIZE); sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, qecp->gregs + GLOB_RSIZE); } static u8 __devinit qec_get_burst(struct device_node *dp) { u8 bsizes, bsizes_more; /* Find and set the burst sizes for the QEC, since it * does the actual dma for all 4 channels. */ bsizes = of_getintprop_default(dp, "burst-sizes", 0xff); bsizes &= 0xff; bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff); if (bsizes_more != 0xff) bsizes &= bsizes_more; if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || (bsizes & DMA_BURST32)==0) bsizes = (DMA_BURST32 - 1); return bsizes; } static struct sunqec * __devinit get_qec(struct platform_device *child) { struct platform_device *op = to_platform_device(child->dev.parent); struct sunqec *qecp; qecp = dev_get_drvdata(&op->dev); if (!qecp) { qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); if (qecp) { u32 ctrl; qecp->op = op; qecp->gregs = of_ioremap(&op->resource[0], 0, GLOB_REG_SIZE, "QEC Global Registers"); if (!qecp->gregs) goto fail; /* Make sure the QEC is in MACE mode. */ ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); ctrl &= 0xf0000000; if (ctrl != GLOB_CTRL_MMODE) { printk(KERN_ERR "qec: Not in MACE mode!\n"); goto fail; } if (qec_global_reset(qecp->gregs)) goto fail; qecp->qec_bursts = qec_get_burst(op->dev.of_node); qec_init_once(qecp, op); if (request_irq(op->archdata.irqs[0], qec_interrupt, IRQF_SHARED, "qec", (void *) qecp)) { printk(KERN_ERR "qec: Can't register irq.\n"); goto fail; } dev_set_drvdata(&op->dev, qecp); qecp->next_module = root_qec_dev; root_qec_dev = qecp; } } return qecp; fail: if (qecp->gregs) of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE); kfree(qecp); return NULL; } static const struct net_device_ops qec_ops = { .ndo_open = qe_open, .ndo_stop = qe_close, .ndo_start_xmit = qe_start_xmit, .ndo_set_rx_mode = qe_set_multicast, .ndo_tx_timeout = qe_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int __devinit qec_ether_init(struct platform_device *op) { static unsigned version_printed; struct net_device *dev; struct sunqec *qecp; struct sunqe *qe; int i, res; if (version_printed++ == 0) printk(KERN_INFO "%s", version); dev = alloc_etherdev(sizeof(struct sunqe)); if (!dev) return -ENOMEM; memcpy(dev->dev_addr, idprom->id_ethaddr, 6); qe = netdev_priv(dev); res = -ENODEV; i = of_getintprop_default(op->dev.of_node, "channel#", -1); if (i == -1) goto fail; qe->channel = i; spin_lock_init(&qe->lock); qecp = get_qec(op); if (!qecp) goto fail; qecp->qes[qe->channel] = qe; qe->dev = dev; qe->parent = qecp; qe->op = op; res = -ENOMEM; qe->qcregs = of_ioremap(&op->resource[0], 0, CREG_REG_SIZE, "QEC Channel Registers"); if (!qe->qcregs) { printk(KERN_ERR "qe: Cannot map channel registers.\n"); goto fail; } qe->mregs = of_ioremap(&op->resource[1], 0, MREGS_REG_SIZE, "QE MACE Registers"); if (!qe->mregs) { printk(KERN_ERR "qe: Cannot map MACE registers.\n"); goto fail; } qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE, &qe->qblock_dvma, GFP_ATOMIC); qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers), &qe->buffers_dvma, GFP_ATOMIC); if (qe->qe_block == NULL || qe->qblock_dvma == 0 || qe->buffers == NULL || qe->buffers_dvma == 0) goto fail; /* Stop this QE. */ qe_stop(qe); SET_NETDEV_DEV(dev, &op->dev); dev->watchdog_timeo = 5*HZ; dev->irq = op->archdata.irqs[0]; dev->dma = 0; dev->ethtool_ops = &qe_ethtool_ops; dev->netdev_ops = &qec_ops; res = register_netdev(dev); if (res) goto fail; dev_set_drvdata(&op->dev, qe); printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel, dev->dev_addr); return 0; fail: if (qe->qcregs) of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE); if (qe->mregs) of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE); if (qe->qe_block) dma_free_coherent(&op->dev, PAGE_SIZE, qe->qe_block, qe->qblock_dvma); if (qe->buffers) dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), qe->buffers, qe->buffers_dvma); free_netdev(dev); return res; } static int __devinit qec_sbus_probe(struct platform_device *op) { return qec_ether_init(op); } static int __devexit qec_sbus_remove(struct platform_device *op) { struct sunqe *qp = dev_get_drvdata(&op->dev); struct net_device *net_dev = qp->dev; unregister_netdev(net_dev); of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); dma_free_coherent(&op->dev, PAGE_SIZE, qp->qe_block, qp->qblock_dvma); dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), qp->buffers, qp->buffers_dvma); free_netdev(net_dev); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id qec_sbus_match[] = { { .name = "qe", }, {}, }; MODULE_DEVICE_TABLE(of, qec_sbus_match); static struct platform_driver qec_sbus_driver = { .driver = { .name = "qec", .owner = THIS_MODULE, .of_match_table = qec_sbus_match, }, .probe = qec_sbus_probe, .remove = __devexit_p(qec_sbus_remove), }; static int __init qec_init(void) { return platform_driver_register(&qec_sbus_driver); } static void __exit qec_exit(void) { platform_driver_unregister(&qec_sbus_driver); while (root_qec_dev) { struct sunqec *next = root_qec_dev->next_module; struct platform_device *op = root_qec_dev->op; free_irq(op->archdata.irqs[0], (void *) root_qec_dev); of_iounmap(&op->resource[0], root_qec_dev->gregs, GLOB_REG_SIZE); kfree(root_qec_dev); root_qec_dev = next; } } module_init(qec_init); module_exit(qec_exit);
gpl-2.0
Android-Butter/aux_kernel_m7
drivers/net/ethernet/8390/apne.c
4856
17103
/* * Amiga Linux/68k 8390 based PCMCIA Ethernet Driver for the Amiga 1200 * * (C) Copyright 1997 Alain Malek * (Alain.Malek@cryogen.com) * * ---------------------------------------------------------------------------- * * This program is based on * * ne.c: A general non-shared-memory NS8390 ethernet driver for linux * Written 1992-94 by Donald Becker. * * 8390.c: A general NS8390 ethernet driver core for linux. * Written 1992-94 by Donald Becker. * * cnetdevice: A Sana-II ethernet driver for AmigaOS * Written by Bruce Abbott (bhabbott@inhb.co.nz) * * ---------------------------------------------------------------------------- * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux * distribution for more details. * * ---------------------------------------------------------------------------- * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <asm/io.h> #include <asm/setup.h> #include <asm/amigaints.h> #include <asm/amigahw.h> #include <asm/amigayle.h> #include <asm/amipcmcia.h> #include "8390.h" /* ---- No user-serviceable parts below ---- */ #define DRV_NAME "apne" #define NE_BASE (dev->base_addr) #define NE_CMD 0x00 #define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ #define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */ #define NE_IO_EXTENT 0x20 #define NE_EN0_ISR 0x07 #define NE_EN0_DCFG 0x0e #define NE_EN0_RSARLO 0x08 #define NE_EN0_RSARHI 0x09 #define NE_EN0_RCNTLO 0x0a #define NE_EN0_RXCR 0x0c #define NE_EN0_TXCR 0x0d #define NE_EN0_RCNTHI 0x0b #define NE_EN0_IMR 0x0f #define NE1SM_START_PG 0x20 /* First page of TX buffer */ #define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ #define NESM_START_PG 0x40 /* First page of TX buffer */ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ struct net_device * __init apne_probe(int unit); static int apne_probe1(struct net_device *dev, int ioaddr); static void apne_reset_8390(struct net_device *dev); static void apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); static void apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void apne_block_output(struct net_device *dev, const int count, const unsigned char *buf, const int start_page); static irqreturn_t apne_interrupt(int irq, void *dev_id); static int init_pcmcia(void); /* IO base address used for nic */ #define IOBASE 0x300 /* use MANUAL_CONFIG and MANUAL_OFFSET for enabling IO by hand you can find the values to use by looking at the cnet.device config file example (the default values are for the CNET40BC card) */ /* #define MANUAL_CONFIG 0x20 #define MANUAL_OFFSET 0x3f8 #define MANUAL_HWADDR0 0x00 #define MANUAL_HWADDR1 0x12 #define MANUAL_HWADDR2 0x34 #define MANUAL_HWADDR3 0x56 #define MANUAL_HWADDR4 0x78 #define MANUAL_HWADDR5 0x9a */ static const char version[] = "apne.c:v1.1 7/10/98 Alain Malek (Alain.Malek@cryogen.ch)\n"; static int apne_owned; /* signal if card already owned */ struct net_device * __init apne_probe(int unit) { struct net_device *dev; #ifndef MANUAL_CONFIG char tuple[8]; #endif int err; if (!MACH_IS_AMIGA) return ERR_PTR(-ENODEV); if (apne_owned) return ERR_PTR(-ENODEV); if ( !(AMIGAHW_PRESENT(PCMCIA)) ) return ERR_PTR(-ENODEV); printk("Looking for PCMCIA ethernet card : "); /* check if a card is inserted */ if (!(PCMCIA_INSERTED)) { printk("NO PCMCIA card inserted\n"); return ERR_PTR(-ENODEV); } dev = alloc_ei_netdev(); if (!dev) return ERR_PTR(-ENOMEM); if (unit >= 0) { sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } /* disable pcmcia irq for readtuple */ pcmcia_disable_irq(); #ifndef MANUAL_CONFIG if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) || (tuple[2] != CISTPL_FUNCID_NETWORK)) { printk("not an ethernet card\n"); /* XXX: shouldn't we re-enable irq here? */ free_netdev(dev); return ERR_PTR(-ENODEV); } #endif printk("ethernet PCMCIA card inserted\n"); if (!init_pcmcia()) { /* XXX: shouldn't we re-enable irq here? */ free_netdev(dev); return ERR_PTR(-ENODEV); } if (!request_region(IOBASE, 0x20, DRV_NAME)) { free_netdev(dev); return ERR_PTR(-EBUSY); } err = apne_probe1(dev, IOBASE); if (err) { release_region(IOBASE, 0x20); free_netdev(dev); return ERR_PTR(err); } err = register_netdev(dev); if (!err) return dev; pcmcia_disable_irq(); free_irq(IRQ_AMIGA_PORTS, dev); pcmcia_reset(); release_region(IOBASE, 0x20); free_netdev(dev); return ERR_PTR(err); } static int __init apne_probe1(struct net_device *dev, int ioaddr) { int i; unsigned char SA_prom[32]; int wordlength = 2; const char *name = NULL; int start_page, stop_page; #ifndef MANUAL_HWADDR0 int neX000, ctron; #endif static unsigned version_printed; if (ei_debug && version_printed++ == 0) printk(version); printk("PCMCIA NE*000 ethercard probe"); /* Reset card. Who knows what dain-bramaged state it was left in. */ { unsigned long reset_start_time = jiffies; outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) if (time_after(jiffies, reset_start_time + 2*HZ/100)) { printk(" not found (no reset ack).\n"); return -ENODEV; } outb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */ } #ifndef MANUAL_HWADDR0 /* Read the 16 bytes of station address PROM. We must first initialize registers, similar to NS8390_init(eifdev, 0). We can't reliably read the SAPROM address without this. (I learned the hard way!). */ { struct {unsigned long value, offset; } program_seq[] = { {E8390_NODMA+E8390_PAGE0+E8390_STOP, NE_CMD}, /* Select page 0*/ {0x48, NE_EN0_DCFG}, /* Set byte-wide (0x48) access. */ {0x00, NE_EN0_RCNTLO}, /* Clear the count regs. */ {0x00, NE_EN0_RCNTHI}, {0x00, NE_EN0_IMR}, /* Mask completion irq. */ {0xFF, NE_EN0_ISR}, {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */ {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode. */ {32, NE_EN0_RCNTLO}, {0x00, NE_EN0_RCNTHI}, {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000. */ {0x00, NE_EN0_RSARHI}, {E8390_RREAD+E8390_START, NE_CMD}, }; for (i = 0; i < ARRAY_SIZE(program_seq); i++) { outb(program_seq[i].value, ioaddr + program_seq[i].offset); } } for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) { SA_prom[i] = inb(ioaddr + NE_DATAPORT); SA_prom[i+1] = inb(ioaddr + NE_DATAPORT); if (SA_prom[i] != SA_prom[i+1]) wordlength = 1; } /* At this point, wordlength *only* tells us if the SA_prom is doubled up or not because some broken PCI cards don't respect the byte-wide request in program_seq above, and hence don't have doubled up values. These broken cards would otherwise be detected as an ne1000. */ if (wordlength == 2) for (i = 0; i < 16; i++) SA_prom[i] = SA_prom[i+i]; if (wordlength == 2) { /* We must set the 8390 for word mode. */ outb(0x49, ioaddr + NE_EN0_DCFG); start_page = NESM_START_PG; stop_page = NESM_STOP_PG; } else { start_page = NE1SM_START_PG; stop_page = NE1SM_STOP_PG; } neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57); ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d); /* Set up the rest of the parameters. */ if (neX000) { name = (wordlength == 2) ? "NE2000" : "NE1000"; } else if (ctron) { name = (wordlength == 2) ? "Ctron-8" : "Ctron-16"; start_page = 0x01; stop_page = (wordlength == 2) ? 0x40 : 0x20; } else { printk(" not found.\n"); return -ENXIO; } #else wordlength = 2; /* We must set the 8390 for word mode. */ outb(0x49, ioaddr + NE_EN0_DCFG); start_page = NESM_START_PG; stop_page = NESM_STOP_PG; SA_prom[0] = MANUAL_HWADDR0; SA_prom[1] = MANUAL_HWADDR1; SA_prom[2] = MANUAL_HWADDR2; SA_prom[3] = MANUAL_HWADDR3; SA_prom[4] = MANUAL_HWADDR4; SA_prom[5] = MANUAL_HWADDR5; name = "NE2000"; #endif dev->base_addr = ioaddr; dev->irq = IRQ_AMIGA_PORTS; dev->netdev_ops = &ei_netdev_ops; /* Install the Interrupt handler */ i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev); if (i) return i; for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = SA_prom[i]; printk(" %pM\n", dev->dev_addr); printk("%s: %s found.\n", dev->name, name); ei_status.name = name; ei_status.tx_start_page = start_page; ei_status.stop_page = stop_page; ei_status.word16 = (wordlength == 2); ei_status.rx_start_page = start_page + TX_PAGES; ei_status.reset_8390 = &apne_reset_8390; ei_status.block_input = &apne_block_input; ei_status.block_output = &apne_block_output; ei_status.get_8390_hdr = &apne_get_8390_hdr; NS8390_init(dev, 0); pcmcia_ack_int(pcmcia_get_intreq()); /* ack PCMCIA int req */ pcmcia_enable_irq(); apne_owned = 1; return 0; } /* Hard reset the card. This used to pause for the same period that a 8390 reset command required, but that shouldn't be necessary. */ static void apne_reset_8390(struct net_device *dev) { unsigned long reset_start_time = jiffies; init_pcmcia(); if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies); outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); ei_status.txing = 0; ei_status.dmaing = 0; /* This check _should_not_ be necessary, omit eventually. */ while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) if (time_after(jiffies, reset_start_time + 2*HZ/100)) { printk("%s: ne_reset_8390() did not complete.\n", dev->name); break; } outb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */ } /* Grab the 8390 specific header. Similar to the block_input routine, but we don't need to be concerned with ring wrap as the header will be at the start of a page, so we optimize accordingly. */ static void apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { int nic_base = dev->base_addr; int cnt; char *ptrc; short *ptrs; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk("%s: DMAing conflict in ne_get_8390_hdr " "[DMAstat:%d][irqlock:%d][intr:%d].\n", dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq); return; } ei_status.dmaing |= 0x01; outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); outb(ENISR_RDC, nic_base + NE_EN0_ISR); outb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO); outb(0, nic_base + NE_EN0_RCNTHI); outb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */ outb(ring_page, nic_base + NE_EN0_RSARHI); outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); if (ei_status.word16) { ptrs = (short*)hdr; for(cnt = 0; cnt < (sizeof(struct e8390_pkt_hdr)>>1); cnt++) *ptrs++ = inw(NE_BASE + NE_DATAPORT); } else { ptrc = (char*)hdr; for(cnt = 0; cnt < sizeof(struct e8390_pkt_hdr); cnt++) *ptrc++ = inb(NE_BASE + NE_DATAPORT); } outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; le16_to_cpus(&hdr->count); } /* Block input and output, similar to the Crynwr packet driver. If you are porting to a new ethercard, look at the packet driver source for hints. The NEx000 doesn't share the on-board packet memory -- you have to put the packet out through the "remote DMA" dataport using outb. */ static void apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { int nic_base = dev->base_addr; char *buf = skb->data; char *ptrc; short *ptrs; int cnt; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk("%s: DMAing conflict in ne_block_input " "[DMAstat:%d][irqlock:%d][intr:%d].\n", dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq); return; } ei_status.dmaing |= 0x01; outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); outb(ENISR_RDC, nic_base + NE_EN0_ISR); outb(count & 0xff, nic_base + NE_EN0_RCNTLO); outb(count >> 8, nic_base + NE_EN0_RCNTHI); outb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO); outb(ring_offset >> 8, nic_base + NE_EN0_RSARHI); outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); if (ei_status.word16) { ptrs = (short*)buf; for (cnt = 0; cnt < (count>>1); cnt++) *ptrs++ = inw(NE_BASE + NE_DATAPORT); if (count & 0x01) { buf[count-1] = inb(NE_BASE + NE_DATAPORT); } } else { ptrc = (char*)buf; for (cnt = 0; cnt < count; cnt++) *ptrc++ = inb(NE_BASE + NE_DATAPORT); } outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; } static void apne_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page) { int nic_base = NE_BASE; unsigned long dma_start; char *ptrc; short *ptrs; int cnt; /* Round the count up for word writes. Do we need to do this? What effect will an odd byte count have on the 8390? I should check someday. */ if (ei_status.word16 && (count & 0x01)) count++; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk("%s: DMAing conflict in ne_block_output." "[DMAstat:%d][irqlock:%d][intr:%d]\n", dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq); return; } ei_status.dmaing |= 0x01; /* We should already be in page 0, but to be safe... */ outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Now the normal output. */ outb(count & 0xff, nic_base + NE_EN0_RCNTLO); outb(count >> 8, nic_base + NE_EN0_RCNTHI); outb(0x00, nic_base + NE_EN0_RSARLO); outb(start_page, nic_base + NE_EN0_RSARHI); outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); if (ei_status.word16) { ptrs = (short*)buf; for (cnt = 0; cnt < count>>1; cnt++) outw(*ptrs++, NE_BASE+NE_DATAPORT); } else { ptrc = (char*)buf; for (cnt = 0; cnt < count; cnt++) outb(*ptrc++, NE_BASE + NE_DATAPORT); } dma_start = jiffies; while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ printk("%s: timeout waiting for Tx RDC.\n", dev->name); apne_reset_8390(dev); NS8390_init(dev,1); break; } outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; } static irqreturn_t apne_interrupt(int irq, void *dev_id) { unsigned char pcmcia_intreq; if (!(gayle.inten & GAYLE_IRQ_IRQ)) return IRQ_NONE; pcmcia_intreq = pcmcia_get_intreq(); if (!(pcmcia_intreq & GAYLE_IRQ_IRQ)) { pcmcia_ack_int(pcmcia_intreq); return IRQ_NONE; } if (ei_debug > 3) printk("pcmcia intreq = %x\n", pcmcia_intreq); pcmcia_disable_irq(); /* to get rid of the sti() within ei_interrupt */ ei_interrupt(irq, dev_id); pcmcia_ack_int(pcmcia_get_intreq()); pcmcia_enable_irq(); return IRQ_HANDLED; } #ifdef MODULE static struct net_device *apne_dev; static int __init apne_module_init(void) { apne_dev = apne_probe(-1); if (IS_ERR(apne_dev)) return PTR_ERR(apne_dev); return 0; } static void __exit apne_module_exit(void) { unregister_netdev(apne_dev); pcmcia_disable_irq(); free_irq(IRQ_AMIGA_PORTS, apne_dev); pcmcia_reset(); release_region(IOBASE, 0x20); free_netdev(apne_dev); } module_init(apne_module_init); module_exit(apne_module_exit); #endif static int init_pcmcia(void) { u_char config; #ifndef MANUAL_CONFIG u_char tuple[32]; int offset_len; #endif u_long offset; pcmcia_reset(); pcmcia_program_voltage(PCMCIA_0V); pcmcia_access_speed(PCMCIA_SPEED_250NS); pcmcia_write_enable(); #ifdef MANUAL_CONFIG config = MANUAL_CONFIG; #else /* get and write config byte to enable IO port */ if (pcmcia_copy_tuple(CISTPL_CFTABLE_ENTRY, tuple, 32) < 3) return 0; config = tuple[2] & 0x3f; #endif #ifdef MANUAL_OFFSET offset = MANUAL_OFFSET; #else if (pcmcia_copy_tuple(CISTPL_CONFIG, tuple, 32) < 6) return 0; offset_len = (tuple[2] & 0x3) + 1; offset = 0; while(offset_len--) { offset = (offset << 8) | tuple[4+offset_len]; } #endif out_8(GAYLE_ATTRIBUTE+offset, config); return 1; } MODULE_LICENSE("GPL");
gpl-2.0
GalaxyTab4/android_kernel_motorola_msm8226
drivers/leds/leds-cobalt-qube.c
5112
2021
/* * Copyright 2006 - Florian Fainelli <florian@openwrt.org> * * Control the Cobalt Qube/RaQ front LED */ #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> #define LED_FRONT_LEFT 0x01 #define LED_FRONT_RIGHT 0x02 static void __iomem *led_port; static u8 led_value; static void qube_front_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { if (brightness) led_value = LED_FRONT_LEFT | LED_FRONT_RIGHT; else led_value = ~(LED_FRONT_LEFT | LED_FRONT_RIGHT); writeb(led_value, led_port); } static struct led_classdev qube_front_led = { .name = "qube::front", .brightness = LED_FULL, .brightness_set = qube_front_led_set, .default_trigger = "default-on", }; static int __devinit cobalt_qube_led_probe(struct platform_device *pdev) { struct resource *res; int retval; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EBUSY; led_port = ioremap(res->start, resource_size(res)); if (!led_port) return -ENOMEM; led_value = LED_FRONT_LEFT | LED_FRONT_RIGHT; writeb(led_value, led_port); retval = led_classdev_register(&pdev->dev, &qube_front_led); if (retval) goto err_iounmap; return 0; err_iounmap: iounmap(led_port); led_port = NULL; return retval; } static int __devexit cobalt_qube_led_remove(struct platform_device *pdev) { led_classdev_unregister(&qube_front_led); if (led_port) { iounmap(led_port); led_port = NULL; } return 0; } static struct platform_driver cobalt_qube_led_driver = { .probe = cobalt_qube_led_probe, .remove = __devexit_p(cobalt_qube_led_remove), .driver = { .name = "cobalt-qube-leds", .owner = THIS_MODULE, }, }; module_platform_driver(cobalt_qube_led_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Front LED support for Cobalt Server"); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_ALIAS("platform:cobalt-qube-leds");
gpl-2.0
3EleVen/kernel_common
drivers/i2c/busses/i2c-bfin-twi.c
5112
20431
/* * Blackfin On-Chip Two Wire Interface Driver * * Copyright 2005-2007 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/timer.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <asm/blackfin.h> #include <asm/portmux.h> #include <asm/irq.h> /* SMBus mode*/ #define TWI_I2C_MODE_STANDARD 1 #define TWI_I2C_MODE_STANDARDSUB 2 #define TWI_I2C_MODE_COMBINED 3 #define TWI_I2C_MODE_REPEAT 4 struct bfin_twi_iface { int irq; spinlock_t lock; char read_write; u8 command; u8 *transPtr; int readNum; int writeNum; int cur_mode; int manual_stop; int result; struct i2c_adapter adap; struct completion complete; struct i2c_msg *pmsg; int msg_num; int cur_msg; u16 saved_clkdiv; u16 saved_control; void __iomem *regs_base; }; #define DEFINE_TWI_REG(reg, off) \ static inline u16 read_##reg(struct bfin_twi_iface *iface) \ { return bfin_read16(iface->regs_base + (off)); } \ static inline void write_##reg(struct bfin_twi_iface *iface, u16 v) \ { bfin_write16(iface->regs_base + (off), v); } DEFINE_TWI_REG(CLKDIV, 0x00) DEFINE_TWI_REG(CONTROL, 0x04) DEFINE_TWI_REG(SLAVE_CTL, 0x08) DEFINE_TWI_REG(SLAVE_STAT, 0x0C) DEFINE_TWI_REG(SLAVE_ADDR, 0x10) DEFINE_TWI_REG(MASTER_CTL, 0x14) DEFINE_TWI_REG(MASTER_STAT, 0x18) DEFINE_TWI_REG(MASTER_ADDR, 0x1C) DEFINE_TWI_REG(INT_STAT, 0x20) DEFINE_TWI_REG(INT_MASK, 0x24) DEFINE_TWI_REG(FIFO_CTL, 0x28) DEFINE_TWI_REG(FIFO_STAT, 0x2C) DEFINE_TWI_REG(XMT_DATA8, 0x80) DEFINE_TWI_REG(XMT_DATA16, 0x84) DEFINE_TWI_REG(RCV_DATA8, 0x88) DEFINE_TWI_REG(RCV_DATA16, 0x8C) static const u16 pin_req[2][3] = { {P_TWI0_SCL, P_TWI0_SDA, 0}, {P_TWI1_SCL, P_TWI1_SDA, 0}, }; static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface, unsigned short twi_int_status) { unsigned short mast_stat = read_MASTER_STAT(iface); if (twi_int_status & XMTSERV) { /* Transmit next data */ if (iface->writeNum > 0) { SSYNC(); write_XMT_DATA8(iface, *(iface->transPtr++)); iface->writeNum--; } /* start receive immediately after complete sending in * combine mode. */ else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MDIR | RSTART); else if (iface->manual_stop) write_MASTER_CTL(iface, read_MASTER_CTL(iface) | STOP); else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && iface->cur_msg + 1 < iface->msg_num) { if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD) write_MASTER_CTL(iface, read_MASTER_CTL(iface) | RSTART | MDIR); else write_MASTER_CTL(iface, (read_MASTER_CTL(iface) | RSTART) & ~MDIR); } } if (twi_int_status & RCVSERV) { if (iface->readNum > 0) { /* Receive next data */ *(iface->transPtr) = read_RCV_DATA8(iface); if (iface->cur_mode == TWI_I2C_MODE_COMBINED) { /* Change combine mode into sub mode after * read first data. */ iface->cur_mode = TWI_I2C_MODE_STANDARDSUB; /* Get read number from first byte in block * combine mode. */ if (iface->readNum == 1 && iface->manual_stop) iface->readNum = *iface->transPtr + 1; } iface->transPtr++; iface->readNum--; } else if (iface->manual_stop) { write_MASTER_CTL(iface, read_MASTER_CTL(iface) | STOP); } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && iface->cur_msg + 1 < iface->msg_num) { if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD) write_MASTER_CTL(iface, read_MASTER_CTL(iface) | RSTART | MDIR); else write_MASTER_CTL(iface, (read_MASTER_CTL(iface) | RSTART) & ~MDIR); } } if (twi_int_status & MERR) { write_INT_MASK(iface, 0); write_MASTER_STAT(iface, 0x3e); write_MASTER_CTL(iface, 0); iface->result = -EIO; if (mast_stat & LOSTARB) dev_dbg(&iface->adap.dev, "Lost Arbitration\n"); if (mast_stat & ANAK) dev_dbg(&iface->adap.dev, "Address Not Acknowledged\n"); if (mast_stat & DNAK) dev_dbg(&iface->adap.dev, "Data Not Acknowledged\n"); if (mast_stat & BUFRDERR) dev_dbg(&iface->adap.dev, "Buffer Read Error\n"); if (mast_stat & BUFWRERR) dev_dbg(&iface->adap.dev, "Buffer Write Error\n"); /* Faulty slave devices, may drive SDA low after a transfer * finishes. To release the bus this code generates up to 9 * extra clocks until SDA is released. */ if (read_MASTER_STAT(iface) & SDASEN) { int cnt = 9; do { write_MASTER_CTL(iface, SCLOVR); udelay(6); write_MASTER_CTL(iface, 0); udelay(6); } while ((read_MASTER_STAT(iface) & SDASEN) && cnt--); write_MASTER_CTL(iface, SDAOVR | SCLOVR); udelay(6); write_MASTER_CTL(iface, SDAOVR); udelay(6); write_MASTER_CTL(iface, 0); } /* If it is a quick transfer, only address without data, * not an err, return 1. */ if (iface->cur_mode == TWI_I2C_MODE_STANDARD && iface->transPtr == NULL && (twi_int_status & MCOMP) && (mast_stat & DNAK)) iface->result = 1; complete(&iface->complete); return; } if (twi_int_status & MCOMP) { if ((read_MASTER_CTL(iface) & MEN) == 0 && (iface->cur_mode == TWI_I2C_MODE_REPEAT || iface->cur_mode == TWI_I2C_MODE_COMBINED)) { iface->result = -1; write_INT_MASK(iface, 0); write_MASTER_CTL(iface, 0); } else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) { if (iface->readNum == 0) { /* set the read number to 1 and ask for manual * stop in block combine mode */ iface->readNum = 1; iface->manual_stop = 1; write_MASTER_CTL(iface, read_MASTER_CTL(iface) | (0xff << 6)); } else { /* set the readd number in other * combine mode. */ write_MASTER_CTL(iface, (read_MASTER_CTL(iface) & (~(0xff << 6))) | (iface->readNum << 6)); } /* remove restart bit and enable master receive */ write_MASTER_CTL(iface, read_MASTER_CTL(iface) & ~RSTART); } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && iface->cur_msg+1 < iface->msg_num) { iface->cur_msg++; iface->transPtr = iface->pmsg[iface->cur_msg].buf; iface->writeNum = iface->readNum = iface->pmsg[iface->cur_msg].len; /* Set Transmit device address */ write_MASTER_ADDR(iface, iface->pmsg[iface->cur_msg].addr); if (iface->pmsg[iface->cur_msg].flags & I2C_M_RD) iface->read_write = I2C_SMBUS_READ; else { iface->read_write = I2C_SMBUS_WRITE; /* Transmit first data */ if (iface->writeNum > 0) { write_XMT_DATA8(iface, *(iface->transPtr++)); iface->writeNum--; } } if (iface->pmsg[iface->cur_msg].len <= 255) write_MASTER_CTL(iface, (read_MASTER_CTL(iface) & (~(0xff << 6))) | (iface->pmsg[iface->cur_msg].len << 6)); else { write_MASTER_CTL(iface, (read_MASTER_CTL(iface) | (0xff << 6))); iface->manual_stop = 1; } /* remove restart bit and enable master receive */ write_MASTER_CTL(iface, read_MASTER_CTL(iface) & ~RSTART); } else { iface->result = 1; write_INT_MASK(iface, 0); write_MASTER_CTL(iface, 0); } } complete(&iface->complete); } /* Interrupt handler */ static irqreturn_t bfin_twi_interrupt_entry(int irq, void *dev_id) { struct bfin_twi_iface *iface = dev_id; unsigned long flags; unsigned short twi_int_status; spin_lock_irqsave(&iface->lock, flags); while (1) { twi_int_status = read_INT_STAT(iface); if (!twi_int_status) break; /* Clear interrupt status */ write_INT_STAT(iface, twi_int_status); bfin_twi_handle_interrupt(iface, twi_int_status); SSYNC(); } spin_unlock_irqrestore(&iface->lock, flags); return IRQ_HANDLED; } /* * One i2c master transfer */ static int bfin_twi_do_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct bfin_twi_iface *iface = adap->algo_data; struct i2c_msg *pmsg; int rc = 0; if (!(read_CONTROL(iface) & TWI_ENA)) return -ENXIO; while (read_MASTER_STAT(iface) & BUSBUSY) yield(); iface->pmsg = msgs; iface->msg_num = num; iface->cur_msg = 0; pmsg = &msgs[0]; if (pmsg->flags & I2C_M_TEN) { dev_err(&adap->dev, "10 bits addr not supported!\n"); return -EINVAL; } iface->cur_mode = TWI_I2C_MODE_REPEAT; iface->manual_stop = 0; iface->transPtr = pmsg->buf; iface->writeNum = iface->readNum = pmsg->len; iface->result = 0; init_completion(&(iface->complete)); /* Set Transmit device address */ write_MASTER_ADDR(iface, pmsg->addr); /* FIFO Initiation. Data in FIFO should be * discarded before start a new operation. */ write_FIFO_CTL(iface, 0x3); SSYNC(); write_FIFO_CTL(iface, 0); SSYNC(); if (pmsg->flags & I2C_M_RD) iface->read_write = I2C_SMBUS_READ; else { iface->read_write = I2C_SMBUS_WRITE; /* Transmit first data */ if (iface->writeNum > 0) { write_XMT_DATA8(iface, *(iface->transPtr++)); iface->writeNum--; SSYNC(); } } /* clear int stat */ write_INT_STAT(iface, MERR | MCOMP | XMTSERV | RCVSERV); /* Interrupt mask . Enable XMT, RCV interrupt */ write_INT_MASK(iface, MCOMP | MERR | RCVSERV | XMTSERV); SSYNC(); if (pmsg->len <= 255) write_MASTER_CTL(iface, pmsg->len << 6); else { write_MASTER_CTL(iface, 0xff << 6); iface->manual_stop = 1; } /* Master enable */ write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) | ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0)); SSYNC(); while (!iface->result) { if (!wait_for_completion_timeout(&iface->complete, adap->timeout)) { iface->result = -1; dev_err(&adap->dev, "master transfer timeout\n"); } } if (iface->result == 1) rc = iface->cur_msg + 1; else rc = iface->result; return rc; } /* * Generic i2c master transfer entrypoint */ static int bfin_twi_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { return bfin_twi_do_master_xfer(adap, msgs, num); } /* * One I2C SMBus transfer */ int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct bfin_twi_iface *iface = adap->algo_data; int rc = 0; if (!(read_CONTROL(iface) & TWI_ENA)) return -ENXIO; while (read_MASTER_STAT(iface) & BUSBUSY) yield(); iface->writeNum = 0; iface->readNum = 0; /* Prepare datas & select mode */ switch (size) { case I2C_SMBUS_QUICK: iface->transPtr = NULL; iface->cur_mode = TWI_I2C_MODE_STANDARD; break; case I2C_SMBUS_BYTE: if (data == NULL) iface->transPtr = NULL; else { if (read_write == I2C_SMBUS_READ) iface->readNum = 1; else iface->writeNum = 1; iface->transPtr = &data->byte; } iface->cur_mode = TWI_I2C_MODE_STANDARD; break; case I2C_SMBUS_BYTE_DATA: if (read_write == I2C_SMBUS_READ) { iface->readNum = 1; iface->cur_mode = TWI_I2C_MODE_COMBINED; } else { iface->writeNum = 1; iface->cur_mode = TWI_I2C_MODE_STANDARDSUB; } iface->transPtr = &data->byte; break; case I2C_SMBUS_WORD_DATA: if (read_write == I2C_SMBUS_READ) { iface->readNum = 2; iface->cur_mode = TWI_I2C_MODE_COMBINED; } else { iface->writeNum = 2; iface->cur_mode = TWI_I2C_MODE_STANDARDSUB; } iface->transPtr = (u8 *)&data->word; break; case I2C_SMBUS_PROC_CALL: iface->writeNum = 2; iface->readNum = 2; iface->cur_mode = TWI_I2C_MODE_COMBINED; iface->transPtr = (u8 *)&data->word; break; case I2C_SMBUS_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { iface->readNum = 0; iface->cur_mode = TWI_I2C_MODE_COMBINED; } else { iface->writeNum = data->block[0] + 1; iface->cur_mode = TWI_I2C_MODE_STANDARDSUB; } iface->transPtr = data->block; break; case I2C_SMBUS_I2C_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { iface->readNum = data->block[0]; iface->cur_mode = TWI_I2C_MODE_COMBINED; } else { iface->writeNum = data->block[0]; iface->cur_mode = TWI_I2C_MODE_STANDARDSUB; } iface->transPtr = (u8 *)&data->block[1]; break; default: return -1; } iface->result = 0; iface->manual_stop = 0; iface->read_write = read_write; iface->command = command; init_completion(&(iface->complete)); /* FIFO Initiation. Data in FIFO should be discarded before * start a new operation. */ write_FIFO_CTL(iface, 0x3); SSYNC(); write_FIFO_CTL(iface, 0); /* clear int stat */ write_INT_STAT(iface, MERR | MCOMP | XMTSERV | RCVSERV); /* Set Transmit device address */ write_MASTER_ADDR(iface, addr); SSYNC(); switch (iface->cur_mode) { case TWI_I2C_MODE_STANDARDSUB: write_XMT_DATA8(iface, iface->command); write_INT_MASK(iface, MCOMP | MERR | ((iface->read_write == I2C_SMBUS_READ) ? RCVSERV : XMTSERV)); SSYNC(); if (iface->writeNum + 1 <= 255) write_MASTER_CTL(iface, (iface->writeNum + 1) << 6); else { write_MASTER_CTL(iface, 0xff << 6); iface->manual_stop = 1; } /* Master enable */ write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); break; case TWI_I2C_MODE_COMBINED: write_XMT_DATA8(iface, iface->command); write_INT_MASK(iface, MCOMP | MERR | RCVSERV | XMTSERV); SSYNC(); if (iface->writeNum > 0) write_MASTER_CTL(iface, (iface->writeNum + 1) << 6); else write_MASTER_CTL(iface, 0x1 << 6); /* Master enable */ write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); break; default: write_MASTER_CTL(iface, 0); if (size != I2C_SMBUS_QUICK) { /* Don't access xmit data register when this is a * read operation. */ if (iface->read_write != I2C_SMBUS_READ) { if (iface->writeNum > 0) { write_XMT_DATA8(iface, *(iface->transPtr++)); if (iface->writeNum <= 255) write_MASTER_CTL(iface, iface->writeNum << 6); else { write_MASTER_CTL(iface, 0xff << 6); iface->manual_stop = 1; } iface->writeNum--; } else { write_XMT_DATA8(iface, iface->command); write_MASTER_CTL(iface, 1 << 6); } } else { if (iface->readNum > 0 && iface->readNum <= 255) write_MASTER_CTL(iface, iface->readNum << 6); else if (iface->readNum > 255) { write_MASTER_CTL(iface, 0xff << 6); iface->manual_stop = 1; } else break; } } write_INT_MASK(iface, MCOMP | MERR | ((iface->read_write == I2C_SMBUS_READ) ? RCVSERV : XMTSERV)); SSYNC(); /* Master enable */ write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) | ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0)); break; } SSYNC(); while (!iface->result) { if (!wait_for_completion_timeout(&iface->complete, adap->timeout)) { iface->result = -1; dev_err(&adap->dev, "smbus transfer timeout\n"); } } rc = (iface->result >= 0) ? 0 : -1; return rc; } /* * Generic I2C SMBus transfer entrypoint */ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { return bfin_twi_do_smbus_xfer(adap, addr, flags, read_write, command, size, data); } /* * Return what the adapter supports */ static u32 bfin_twi_functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_I2C | I2C_FUNC_SMBUS_I2C_BLOCK; } static struct i2c_algorithm bfin_twi_algorithm = { .master_xfer = bfin_twi_master_xfer, .smbus_xfer = bfin_twi_smbus_xfer, .functionality = bfin_twi_functionality, }; static int i2c_bfin_twi_suspend(struct platform_device *pdev, pm_message_t state) { struct bfin_twi_iface *iface = platform_get_drvdata(pdev); iface->saved_clkdiv = read_CLKDIV(iface); iface->saved_control = read_CONTROL(iface); free_irq(iface->irq, iface); /* Disable TWI */ write_CONTROL(iface, iface->saved_control & ~TWI_ENA); return 0; } static int i2c_bfin_twi_resume(struct platform_device *pdev) { struct bfin_twi_iface *iface = platform_get_drvdata(pdev); int rc = request_irq(iface->irq, bfin_twi_interrupt_entry, 0, pdev->name, iface); if (rc) { dev_err(&pdev->dev, "Can't get IRQ %d !\n", iface->irq); return -ENODEV; } /* Resume TWI interface clock as specified */ write_CLKDIV(iface, iface->saved_clkdiv); /* Resume TWI */ write_CONTROL(iface, iface->saved_control); return 0; } static int i2c_bfin_twi_probe(struct platform_device *pdev) { struct bfin_twi_iface *iface; struct i2c_adapter *p_adap; struct resource *res; int rc; unsigned int clkhilow; iface = kzalloc(sizeof(struct bfin_twi_iface), GFP_KERNEL); if (!iface) { dev_err(&pdev->dev, "Cannot allocate memory\n"); rc = -ENOMEM; goto out_error_nomem; } spin_lock_init(&(iface->lock)); /* Find and map our resources */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); rc = -ENOENT; goto out_error_get_res; } iface->regs_base = ioremap(res->start, resource_size(res)); if (iface->regs_base == NULL) { dev_err(&pdev->dev, "Cannot map IO\n"); rc = -ENXIO; goto out_error_ioremap; } iface->irq = platform_get_irq(pdev, 0); if (iface->irq < 0) { dev_err(&pdev->dev, "No IRQ specified\n"); rc = -ENOENT; goto out_error_no_irq; } p_adap = &iface->adap; p_adap->nr = pdev->id; strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name)); p_adap->algo = &bfin_twi_algorithm; p_adap->algo_data = iface; p_adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; p_adap->dev.parent = &pdev->dev; p_adap->timeout = 5 * HZ; p_adap->retries = 3; rc = peripheral_request_list(pin_req[pdev->id], "i2c-bfin-twi"); if (rc) { dev_err(&pdev->dev, "Can't setup pin mux!\n"); goto out_error_pin_mux; } rc = request_irq(iface->irq, bfin_twi_interrupt_entry, 0, pdev->name, iface); if (rc) { dev_err(&pdev->dev, "Can't get IRQ %d !\n", iface->irq); rc = -ENODEV; goto out_error_req_irq; } /* Set TWI internal clock as 10MHz */ write_CONTROL(iface, ((get_sclk() / 1000 / 1000 + 5) / 10) & 0x7F); /* * We will not end up with a CLKDIV=0 because no one will specify * 20kHz SCL or less in Kconfig now. (5 * 1000 / 20 = 250) */ clkhilow = ((10 * 1000 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ) + 1) / 2; /* Set Twi interface clock as specified */ write_CLKDIV(iface, (clkhilow << 8) | clkhilow); /* Enable TWI */ write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA); SSYNC(); rc = i2c_add_numbered_adapter(p_adap); if (rc < 0) { dev_err(&pdev->dev, "Can't add i2c adapter!\n"); goto out_error_add_adapter; } platform_set_drvdata(pdev, iface); dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, " "regs_base@%p\n", iface->regs_base); return 0; out_error_add_adapter: free_irq(iface->irq, iface); out_error_req_irq: out_error_no_irq: peripheral_free_list(pin_req[pdev->id]); out_error_pin_mux: iounmap(iface->regs_base); out_error_ioremap: out_error_get_res: kfree(iface); out_error_nomem: return rc; } static int i2c_bfin_twi_remove(struct platform_device *pdev) { struct bfin_twi_iface *iface = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); i2c_del_adapter(&(iface->adap)); free_irq(iface->irq, iface); peripheral_free_list(pin_req[pdev->id]); iounmap(iface->regs_base); kfree(iface); return 0; } static struct platform_driver i2c_bfin_twi_driver = { .probe = i2c_bfin_twi_probe, .remove = i2c_bfin_twi_remove, .suspend = i2c_bfin_twi_suspend, .resume = i2c_bfin_twi_resume, .driver = { .name = "i2c-bfin-twi", .owner = THIS_MODULE, }, }; static int __init i2c_bfin_twi_init(void) { return platform_driver_register(&i2c_bfin_twi_driver); } static void __exit i2c_bfin_twi_exit(void) { platform_driver_unregister(&i2c_bfin_twi_driver); } subsys_initcall(i2c_bfin_twi_init); module_exit(i2c_bfin_twi_exit); MODULE_AUTHOR("Bryan Wu, Sonic Zhang"); MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:i2c-bfin-twi");
gpl-2.0
crewrktablets/rk29_kernel_2.6
drivers/media/video/zoran/zr36016.c
9720
14206
/* * Zoran ZR36016 basic configuration functions * * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at> * * $Id: zr36016.c,v 1.1.2.14 2003/08/20 19:46:55 rbultje Exp $ * * ------------------------------------------------------------------------ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ------------------------------------------------------------------------ */ #define ZR016_VERSION "v0.7" #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/wait.h> /* I/O commands, error codes */ #include <asm/io.h> /* v4l API */ /* headerfile of this module */ #include"zr36016.h" /* codec io API */ #include"videocodec.h" /* it doesn't make sense to have more than 20 or so, just to prevent some unwanted loops */ #define MAX_CODECS 20 /* amount of chips attached via this driver */ static int zr36016_codecs; /* debugging is available via module parameter */ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-4)"); #define dprintk(num, format, args...) \ do { \ if (debug >= num) \ printk(format, ##args); \ } while (0) /* ========================================================================= Local hardware I/O functions: read/write via codec layer (registers are located in the master device) ========================================================================= */ /* read and write functions */ static u8 zr36016_read (struct zr36016 *ptr, u16 reg) { u8 value = 0; // just in case something is wrong... if (ptr->codec->master_data->readreg) value = (ptr->codec->master_data-> readreg(ptr->codec, reg)) & 0xFF; else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing read!\n", ptr->name); dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value); return value; } static void zr36016_write (struct zr36016 *ptr, u16 reg, u8 value) { dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) { ptr->codec->master_data->writereg(ptr->codec, reg, value); } else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing written!\n", ptr->name); } /* indirect read and write functions */ /* the 016 supports auto-addr-increment, but * writing it all time cost not much and is safer... */ static u8 zr36016_readi (struct zr36016 *ptr, u16 reg) { u8 value = 0; // just in case something is wrong... if ((ptr->codec->master_data->writereg) && (ptr->codec->master_data->readreg)) { ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR value = (ptr->codec->master_data->readreg(ptr->codec, ZR016_IDATA)) & 0xFF; // DATA } else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing read (i)!\n", ptr->name); dprintk(4, "%s: reading indirect from 0x%04x: %02x\n", ptr->name, reg, value); return value; } static void zr36016_writei (struct zr36016 *ptr, u16 reg, u8 value) { dprintk(4, "%s: writing indirect 0x%02x to 0x%04x\n", ptr->name, value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) { ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR ptr->codec->master_data->writereg(ptr->codec, ZR016_IDATA, value & 0x0FF); // DATA } else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing written (i)!\n", ptr->name); } /* ========================================================================= Local helper function: version read ========================================================================= */ /* version kept in datastructure */ static u8 zr36016_read_version (struct zr36016 *ptr) { ptr->version = zr36016_read(ptr, 0) >> 4; return ptr->version; } /* ========================================================================= Local helper function: basic test of "connectivity", writes/reads to/from PAX-Lo register ========================================================================= */ static int zr36016_basic_test (struct zr36016 *ptr) { if (debug) { int i; zr36016_writei(ptr, ZR016I_PAX_LO, 0x55); dprintk(1, KERN_INFO "%s: registers: ", ptr->name); for (i = 0; i <= 0x0b; i++) dprintk(1, "%02x ", zr36016_readi(ptr, i)); dprintk(1, "\n"); } // for testing just write 0, then the default value to a register and read // it back in both cases zr36016_writei(ptr, ZR016I_PAX_LO, 0x00); if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to vfe processor!\n", ptr->name); return -ENXIO; } zr36016_writei(ptr, ZR016I_PAX_LO, 0x0d0); if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0d0) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to vfe processor!\n", ptr->name); return -ENXIO; } // we allow version numbers from 0-3, should be enough, though zr36016_read_version(ptr); if (ptr->version & 0x0c) { dprintk(1, KERN_ERR "%s: attach failed, suspicious version %d found...\n", ptr->name, ptr->version); return -ENXIO; } return 0; /* looks good! */ } /* ========================================================================= Local helper function: simple loop for pushing the init datasets - NO USE -- ========================================================================= */ #if 0 static int zr36016_pushit (struct zr36016 *ptr, u16 startreg, u16 len, const char *data) { int i=0; dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, startreg,len); while (i<len) { zr36016_writei(ptr, startreg++, data[i++]); } return i; } #endif /* ========================================================================= Basic datasets & init: //TODO// ========================================================================= */ // needed offset values PAL NTSC SECAM static const int zr016_xoff[] = { 20, 20, 20 }; static const int zr016_yoff[] = { 8, 9, 7 }; static void zr36016_init (struct zr36016 *ptr) { // stop any processing zr36016_write(ptr, ZR016_GOSTOP, 0); // mode setup (yuv422 in and out, compression/expansuon due to mode) zr36016_write(ptr, ZR016_MODE, ZR016_YUV422 | ZR016_YUV422_YUV422 | (ptr->mode == CODEC_DO_COMPRESSION ? ZR016_COMPRESSION : ZR016_EXPANSION)); // misc setup zr36016_writei(ptr, ZR016I_SETUP1, (ptr->xdec ? (ZR016_HRFL | ZR016_HORZ) : 0) | (ptr->ydec ? ZR016_VERT : 0) | ZR016_CNTI); zr36016_writei(ptr, ZR016I_SETUP2, ZR016_CCIR); // Window setup // (no extra offset for now, norm defines offset, default width height) zr36016_writei(ptr, ZR016I_PAX_HI, ptr->width >> 8); zr36016_writei(ptr, ZR016I_PAX_LO, ptr->width & 0xFF); zr36016_writei(ptr, ZR016I_PAY_HI, ptr->height >> 8); zr36016_writei(ptr, ZR016I_PAY_LO, ptr->height & 0xFF); zr36016_writei(ptr, ZR016I_NAX_HI, ptr->xoff >> 8); zr36016_writei(ptr, ZR016I_NAX_LO, ptr->xoff & 0xFF); zr36016_writei(ptr, ZR016I_NAY_HI, ptr->yoff >> 8); zr36016_writei(ptr, ZR016I_NAY_LO, ptr->yoff & 0xFF); /* shall we continue now, please? */ zr36016_write(ptr, ZR016_GOSTOP, 1); } /* ========================================================================= CODEC API FUNCTIONS this functions are accessed by the master via the API structure ========================================================================= */ /* set compression/expansion mode and launches codec - this should be the last call from the master before starting processing */ static int zr36016_set_mode (struct videocodec *codec, int mode) { struct zr36016 *ptr = (struct zr36016 *) codec->data; dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) return -EINVAL; ptr->mode = mode; zr36016_init(ptr); return 0; } /* set picture size */ static int zr36016_set_video (struct videocodec *codec, struct tvnorm *norm, struct vfe_settings *cap, struct vfe_polarity *pol) { struct zr36016 *ptr = (struct zr36016 *) codec->data; dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n", ptr->name, norm->HStart, norm->VStart, cap->x, cap->y, cap->width, cap->height, cap->decimation); /* if () return -EINVAL; * trust the master driver that it knows what it does - so * we allow invalid startx/y for now ... */ ptr->width = cap->width; ptr->height = cap->height; /* (Ronald) This is ugly. zoran_device.c, line 387 * already mentions what happens if HStart is even * (blue faces, etc., cr/cb inversed). There's probably * some good reason why HStart is 0 instead of 1, so I'm * leaving it to this for now, but really... This can be * done a lot simpler */ ptr->xoff = (norm->HStart ? norm->HStart : 1) + cap->x; /* Something to note here (I don't understand it), setting * VStart too high will cause the codec to 'not work'. I * really don't get it. values of 16 (VStart) already break * it here. Just '0' seems to work. More testing needed! */ ptr->yoff = norm->VStart + cap->y; /* (Ronald) dzjeeh, can't this thing do hor_decimation = 4? */ ptr->xdec = ((cap->decimation & 0xff) == 1) ? 0 : 1; ptr->ydec = (((cap->decimation >> 8) & 0xff) == 1) ? 0 : 1; return 0; } /* additional control functions */ static int zr36016_control (struct videocodec *codec, int type, int size, void *data) { struct zr36016 *ptr = (struct zr36016 *) codec->data; int *ival = (int *) data; dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, size); switch (type) { case CODEC_G_STATUS: /* get last status - we don't know it ... */ if (size != sizeof(int)) return -EFAULT; *ival = 0; break; case CODEC_G_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; *ival = 0; break; case CODEC_S_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; if (*ival != 0) return -EINVAL; /* not needed, do nothing */ return 0; case CODEC_G_VFE: case CODEC_S_VFE: return 0; case CODEC_S_MMAP: /* not available, give an error */ return -ENXIO; default: return -EINVAL; } return size; } /* ========================================================================= Exit and unregister function: Deinitializes Zoran's JPEG processor ========================================================================= */ static int zr36016_unset (struct videocodec *codec) { struct zr36016 *ptr = codec->data; if (ptr) { /* do wee need some codec deinit here, too ???? */ dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num); kfree(ptr); codec->data = NULL; zr36016_codecs--; return 0; } return -EFAULT; } /* ========================================================================= Setup and registry function: Initializes Zoran's JPEG processor Also sets pixel size, average code size, mode (compr./decompr.) (the given size is determined by the processor with the video interface) ========================================================================= */ static int zr36016_setup (struct videocodec *codec) { struct zr36016 *ptr; int res; dprintk(2, "zr36016: initializing VFE subsystem #%d.\n", zr36016_codecs); if (zr36016_codecs == MAX_CODECS) { dprintk(1, KERN_ERR "zr36016: Can't attach more codecs!\n"); return -ENOSPC; } //mem structure init codec->data = ptr = kzalloc(sizeof(struct zr36016), GFP_KERNEL); if (NULL == ptr) { dprintk(1, KERN_ERR "zr36016: Can't get enough memory!\n"); return -ENOMEM; } snprintf(ptr->name, sizeof(ptr->name), "zr36016[%d]", zr36016_codecs); ptr->num = zr36016_codecs++; ptr->codec = codec; //testing res = zr36016_basic_test(ptr); if (res < 0) { zr36016_unset(codec); return res; } //final setup ptr->mode = CODEC_DO_COMPRESSION; ptr->width = 768; ptr->height = 288; ptr->xdec = 1; ptr->ydec = 0; zr36016_init(ptr); dprintk(1, KERN_INFO "%s: codec v%d attached and running\n", ptr->name, ptr->version); return 0; } static const struct videocodec zr36016_codec = { .owner = THIS_MODULE, .name = "zr36016", .magic = 0L, // magic not used .flags = CODEC_FLAG_HARDWARE | CODEC_FLAG_VFE | CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER, .type = CODEC_TYPE_ZR36016, .setup = zr36016_setup, // functionality .unset = zr36016_unset, .set_mode = zr36016_set_mode, .set_video = zr36016_set_video, .control = zr36016_control, // others are not used }; /* ========================================================================= HOOK IN DRIVER AS KERNEL MODULE ========================================================================= */ static int __init zr36016_init_module (void) { //dprintk(1, "ZR36016 driver %s\n",ZR016_VERSION); zr36016_codecs = 0; return videocodec_register(&zr36016_codec); } static void __exit zr36016_cleanup_module (void) { if (zr36016_codecs) { dprintk(1, "zr36016: something's wrong - %d codecs left somehow.\n", zr36016_codecs); } videocodec_unregister(&zr36016_codec); } module_init(zr36016_init_module); module_exit(zr36016_cleanup_module); MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>"); MODULE_DESCRIPTION("Driver module for ZR36016 video frontends " ZR016_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
cameron581/lge-kernel-gproj
drivers/media/video/zoran/zr36016.c
9720
14206
/* * Zoran ZR36016 basic configuration functions * * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at> * * $Id: zr36016.c,v 1.1.2.14 2003/08/20 19:46:55 rbultje Exp $ * * ------------------------------------------------------------------------ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ------------------------------------------------------------------------ */ #define ZR016_VERSION "v0.7" #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/wait.h> /* I/O commands, error codes */ #include <asm/io.h> /* v4l API */ /* headerfile of this module */ #include"zr36016.h" /* codec io API */ #include"videocodec.h" /* it doesn't make sense to have more than 20 or so, just to prevent some unwanted loops */ #define MAX_CODECS 20 /* amount of chips attached via this driver */ static int zr36016_codecs; /* debugging is available via module parameter */ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-4)"); #define dprintk(num, format, args...) \ do { \ if (debug >= num) \ printk(format, ##args); \ } while (0) /* ========================================================================= Local hardware I/O functions: read/write via codec layer (registers are located in the master device) ========================================================================= */ /* read and write functions */ static u8 zr36016_read (struct zr36016 *ptr, u16 reg) { u8 value = 0; // just in case something is wrong... if (ptr->codec->master_data->readreg) value = (ptr->codec->master_data-> readreg(ptr->codec, reg)) & 0xFF; else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing read!\n", ptr->name); dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value); return value; } static void zr36016_write (struct zr36016 *ptr, u16 reg, u8 value) { dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) { ptr->codec->master_data->writereg(ptr->codec, reg, value); } else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing written!\n", ptr->name); } /* indirect read and write functions */ /* the 016 supports auto-addr-increment, but * writing it all time cost not much and is safer... */ static u8 zr36016_readi (struct zr36016 *ptr, u16 reg) { u8 value = 0; // just in case something is wrong... if ((ptr->codec->master_data->writereg) && (ptr->codec->master_data->readreg)) { ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR value = (ptr->codec->master_data->readreg(ptr->codec, ZR016_IDATA)) & 0xFF; // DATA } else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing read (i)!\n", ptr->name); dprintk(4, "%s: reading indirect from 0x%04x: %02x\n", ptr->name, reg, value); return value; } static void zr36016_writei (struct zr36016 *ptr, u16 reg, u8 value) { dprintk(4, "%s: writing indirect 0x%02x to 0x%04x\n", ptr->name, value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) { ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR ptr->codec->master_data->writereg(ptr->codec, ZR016_IDATA, value & 0x0FF); // DATA } else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing written (i)!\n", ptr->name); } /* ========================================================================= Local helper function: version read ========================================================================= */ /* version kept in datastructure */ static u8 zr36016_read_version (struct zr36016 *ptr) { ptr->version = zr36016_read(ptr, 0) >> 4; return ptr->version; } /* ========================================================================= Local helper function: basic test of "connectivity", writes/reads to/from PAX-Lo register ========================================================================= */ static int zr36016_basic_test (struct zr36016 *ptr) { if (debug) { int i; zr36016_writei(ptr, ZR016I_PAX_LO, 0x55); dprintk(1, KERN_INFO "%s: registers: ", ptr->name); for (i = 0; i <= 0x0b; i++) dprintk(1, "%02x ", zr36016_readi(ptr, i)); dprintk(1, "\n"); } // for testing just write 0, then the default value to a register and read // it back in both cases zr36016_writei(ptr, ZR016I_PAX_LO, 0x00); if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to vfe processor!\n", ptr->name); return -ENXIO; } zr36016_writei(ptr, ZR016I_PAX_LO, 0x0d0); if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0d0) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to vfe processor!\n", ptr->name); return -ENXIO; } // we allow version numbers from 0-3, should be enough, though zr36016_read_version(ptr); if (ptr->version & 0x0c) { dprintk(1, KERN_ERR "%s: attach failed, suspicious version %d found...\n", ptr->name, ptr->version); return -ENXIO; } return 0; /* looks good! */ } /* ========================================================================= Local helper function: simple loop for pushing the init datasets - NO USE -- ========================================================================= */ #if 0 static int zr36016_pushit (struct zr36016 *ptr, u16 startreg, u16 len, const char *data) { int i=0; dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, startreg,len); while (i<len) { zr36016_writei(ptr, startreg++, data[i++]); } return i; } #endif /* ========================================================================= Basic datasets & init: //TODO// ========================================================================= */ // needed offset values PAL NTSC SECAM static const int zr016_xoff[] = { 20, 20, 20 }; static const int zr016_yoff[] = { 8, 9, 7 }; static void zr36016_init (struct zr36016 *ptr) { // stop any processing zr36016_write(ptr, ZR016_GOSTOP, 0); // mode setup (yuv422 in and out, compression/expansuon due to mode) zr36016_write(ptr, ZR016_MODE, ZR016_YUV422 | ZR016_YUV422_YUV422 | (ptr->mode == CODEC_DO_COMPRESSION ? ZR016_COMPRESSION : ZR016_EXPANSION)); // misc setup zr36016_writei(ptr, ZR016I_SETUP1, (ptr->xdec ? (ZR016_HRFL | ZR016_HORZ) : 0) | (ptr->ydec ? ZR016_VERT : 0) | ZR016_CNTI); zr36016_writei(ptr, ZR016I_SETUP2, ZR016_CCIR); // Window setup // (no extra offset for now, norm defines offset, default width height) zr36016_writei(ptr, ZR016I_PAX_HI, ptr->width >> 8); zr36016_writei(ptr, ZR016I_PAX_LO, ptr->width & 0xFF); zr36016_writei(ptr, ZR016I_PAY_HI, ptr->height >> 8); zr36016_writei(ptr, ZR016I_PAY_LO, ptr->height & 0xFF); zr36016_writei(ptr, ZR016I_NAX_HI, ptr->xoff >> 8); zr36016_writei(ptr, ZR016I_NAX_LO, ptr->xoff & 0xFF); zr36016_writei(ptr, ZR016I_NAY_HI, ptr->yoff >> 8); zr36016_writei(ptr, ZR016I_NAY_LO, ptr->yoff & 0xFF); /* shall we continue now, please? */ zr36016_write(ptr, ZR016_GOSTOP, 1); } /* ========================================================================= CODEC API FUNCTIONS this functions are accessed by the master via the API structure ========================================================================= */ /* set compression/expansion mode and launches codec - this should be the last call from the master before starting processing */ static int zr36016_set_mode (struct videocodec *codec, int mode) { struct zr36016 *ptr = (struct zr36016 *) codec->data; dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) return -EINVAL; ptr->mode = mode; zr36016_init(ptr); return 0; } /* set picture size */ static int zr36016_set_video (struct videocodec *codec, struct tvnorm *norm, struct vfe_settings *cap, struct vfe_polarity *pol) { struct zr36016 *ptr = (struct zr36016 *) codec->data; dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n", ptr->name, norm->HStart, norm->VStart, cap->x, cap->y, cap->width, cap->height, cap->decimation); /* if () return -EINVAL; * trust the master driver that it knows what it does - so * we allow invalid startx/y for now ... */ ptr->width = cap->width; ptr->height = cap->height; /* (Ronald) This is ugly. zoran_device.c, line 387 * already mentions what happens if HStart is even * (blue faces, etc., cr/cb inversed). There's probably * some good reason why HStart is 0 instead of 1, so I'm * leaving it to this for now, but really... This can be * done a lot simpler */ ptr->xoff = (norm->HStart ? norm->HStart : 1) + cap->x; /* Something to note here (I don't understand it), setting * VStart too high will cause the codec to 'not work'. I * really don't get it. values of 16 (VStart) already break * it here. Just '0' seems to work. More testing needed! */ ptr->yoff = norm->VStart + cap->y; /* (Ronald) dzjeeh, can't this thing do hor_decimation = 4? */ ptr->xdec = ((cap->decimation & 0xff) == 1) ? 0 : 1; ptr->ydec = (((cap->decimation >> 8) & 0xff) == 1) ? 0 : 1; return 0; } /* additional control functions */ static int zr36016_control (struct videocodec *codec, int type, int size, void *data) { struct zr36016 *ptr = (struct zr36016 *) codec->data; int *ival = (int *) data; dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, size); switch (type) { case CODEC_G_STATUS: /* get last status - we don't know it ... */ if (size != sizeof(int)) return -EFAULT; *ival = 0; break; case CODEC_G_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; *ival = 0; break; case CODEC_S_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; if (*ival != 0) return -EINVAL; /* not needed, do nothing */ return 0; case CODEC_G_VFE: case CODEC_S_VFE: return 0; case CODEC_S_MMAP: /* not available, give an error */ return -ENXIO; default: return -EINVAL; } return size; } /* ========================================================================= Exit and unregister function: Deinitializes Zoran's JPEG processor ========================================================================= */ static int zr36016_unset (struct videocodec *codec) { struct zr36016 *ptr = codec->data; if (ptr) { /* do wee need some codec deinit here, too ???? */ dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num); kfree(ptr); codec->data = NULL; zr36016_codecs--; return 0; } return -EFAULT; } /* ========================================================================= Setup and registry function: Initializes Zoran's JPEG processor Also sets pixel size, average code size, mode (compr./decompr.) (the given size is determined by the processor with the video interface) ========================================================================= */ static int zr36016_setup (struct videocodec *codec) { struct zr36016 *ptr; int res; dprintk(2, "zr36016: initializing VFE subsystem #%d.\n", zr36016_codecs); if (zr36016_codecs == MAX_CODECS) { dprintk(1, KERN_ERR "zr36016: Can't attach more codecs!\n"); return -ENOSPC; } //mem structure init codec->data = ptr = kzalloc(sizeof(struct zr36016), GFP_KERNEL); if (NULL == ptr) { dprintk(1, KERN_ERR "zr36016: Can't get enough memory!\n"); return -ENOMEM; } snprintf(ptr->name, sizeof(ptr->name), "zr36016[%d]", zr36016_codecs); ptr->num = zr36016_codecs++; ptr->codec = codec; //testing res = zr36016_basic_test(ptr); if (res < 0) { zr36016_unset(codec); return res; } //final setup ptr->mode = CODEC_DO_COMPRESSION; ptr->width = 768; ptr->height = 288; ptr->xdec = 1; ptr->ydec = 0; zr36016_init(ptr); dprintk(1, KERN_INFO "%s: codec v%d attached and running\n", ptr->name, ptr->version); return 0; } static const struct videocodec zr36016_codec = { .owner = THIS_MODULE, .name = "zr36016", .magic = 0L, // magic not used .flags = CODEC_FLAG_HARDWARE | CODEC_FLAG_VFE | CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER, .type = CODEC_TYPE_ZR36016, .setup = zr36016_setup, // functionality .unset = zr36016_unset, .set_mode = zr36016_set_mode, .set_video = zr36016_set_video, .control = zr36016_control, // others are not used }; /* ========================================================================= HOOK IN DRIVER AS KERNEL MODULE ========================================================================= */ static int __init zr36016_init_module (void) { //dprintk(1, "ZR36016 driver %s\n",ZR016_VERSION); zr36016_codecs = 0; return videocodec_register(&zr36016_codec); } static void __exit zr36016_cleanup_module (void) { if (zr36016_codecs) { dprintk(1, "zr36016: something's wrong - %d codecs left somehow.\n", zr36016_codecs); } videocodec_unregister(&zr36016_codec); } module_init(zr36016_init_module); module_exit(zr36016_cleanup_module); MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>"); MODULE_DESCRIPTION("Driver module for ZR36016 video frontends " ZR016_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
JackpotClavin/android_kernel_samsung_venturi
net/ipx/ipx_route.c
11768
6962
/* * Implements the IPX routing routines. * Code moved from af_ipx.c. * * Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 2003 * * See net/ipx/ChangeLog. */ #include <linux/list.h> #include <linux/route.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <net/ipx.h> #include <net/sock.h> LIST_HEAD(ipx_routes); DEFINE_RWLOCK(ipx_routes_lock); extern struct ipx_interface *ipx_internal_net; extern __be16 ipx_cksum(struct ipxhdr *packet, int length); extern struct ipx_interface *ipxitf_find_using_net(__be32 net); extern int ipxitf_demux_socket(struct ipx_interface *intrfc, struct sk_buff *skb, int copy); extern int ipxitf_demux_socket(struct ipx_interface *intrfc, struct sk_buff *skb, int copy); extern int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node); extern struct ipx_interface *ipxitf_find_using_net(__be32 net); struct ipx_route *ipxrtr_lookup(__be32 net) { struct ipx_route *r; read_lock_bh(&ipx_routes_lock); list_for_each_entry(r, &ipx_routes, node) if (r->ir_net == net) { ipxrtr_hold(r); goto unlock; } r = NULL; unlock: read_unlock_bh(&ipx_routes_lock); return r; } /* * Caller must hold a reference to intrfc */ int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc, unsigned char *node) { struct ipx_route *rt; int rc; /* Get a route structure; either existing or create */ rt = ipxrtr_lookup(network); if (!rt) { rt = kmalloc(sizeof(*rt), GFP_ATOMIC); rc = -EAGAIN; if (!rt) goto out; atomic_set(&rt->refcnt, 1); ipxrtr_hold(rt); write_lock_bh(&ipx_routes_lock); list_add(&rt->node, &ipx_routes); write_unlock_bh(&ipx_routes_lock); } else { rc = -EEXIST; if (intrfc == ipx_internal_net) goto out_put; } rt->ir_net = network; rt->ir_intrfc = intrfc; if (!node) { memset(rt->ir_router_node, '\0', IPX_NODE_LEN); rt->ir_routed = 0; } else { memcpy(rt->ir_router_node, node, IPX_NODE_LEN); rt->ir_routed = 1; } rc = 0; out_put: ipxrtr_put(rt); out: return rc; } void ipxrtr_del_routes(struct ipx_interface *intrfc) { struct ipx_route *r, *tmp; write_lock_bh(&ipx_routes_lock); list_for_each_entry_safe(r, tmp, &ipx_routes, node) if (r->ir_intrfc == intrfc) { list_del(&r->node); ipxrtr_put(r); } write_unlock_bh(&ipx_routes_lock); } static int ipxrtr_create(struct ipx_route_definition *rd) { struct ipx_interface *intrfc; int rc = -ENETUNREACH; /* Find the appropriate interface */ intrfc = ipxitf_find_using_net(rd->ipx_router_network); if (!intrfc) goto out; rc = ipxrtr_add_route(rd->ipx_network, intrfc, rd->ipx_router_node); ipxitf_put(intrfc); out: return rc; } static int ipxrtr_delete(__be32 net) { struct ipx_route *r, *tmp; int rc; write_lock_bh(&ipx_routes_lock); list_for_each_entry_safe(r, tmp, &ipx_routes, node) if (r->ir_net == net) { /* Directly connected; can't lose route */ rc = -EPERM; if (!r->ir_routed) goto out; list_del(&r->node); ipxrtr_put(r); rc = 0; goto out; } rc = -ENOENT; out: write_unlock_bh(&ipx_routes_lock); return rc; } /* * The skb has to be unshared, we'll end up calling ipxitf_send, that'll * modify the packet */ int ipxrtr_route_skb(struct sk_buff *skb) { struct ipxhdr *ipx = ipx_hdr(skb); struct ipx_route *r = ipxrtr_lookup(IPX_SKB_CB(skb)->ipx_dest_net); if (!r) { /* no known route */ kfree_skb(skb); return 0; } ipxitf_hold(r->ir_intrfc); ipxitf_send(r->ir_intrfc, skb, r->ir_routed ? r->ir_router_node : ipx->ipx_dest.node); ipxitf_put(r->ir_intrfc); ipxrtr_put(r); return 0; } /* * Route an outgoing frame from a socket. */ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, struct iovec *iov, size_t len, int noblock) { struct sk_buff *skb; struct ipx_sock *ipxs = ipx_sk(sk); struct ipx_interface *intrfc; struct ipxhdr *ipx; size_t size; int ipx_offset; struct ipx_route *rt = NULL; int rc; /* Find the appropriate interface on which to send packet */ if (!usipx->sipx_network && ipx_primary_net) { usipx->sipx_network = ipx_primary_net->if_netnum; intrfc = ipx_primary_net; } else { rt = ipxrtr_lookup(usipx->sipx_network); rc = -ENETUNREACH; if (!rt) goto out; intrfc = rt->ir_intrfc; } ipxitf_hold(intrfc); ipx_offset = intrfc->if_ipx_offset; size = sizeof(struct ipxhdr) + len + ipx_offset; skb = sock_alloc_send_skb(sk, size, noblock, &rc); if (!skb) goto out_put; skb_reserve(skb, ipx_offset); skb->sk = sk; /* Fill in IPX header */ skb_reset_network_header(skb); skb_reset_transport_header(skb); skb_put(skb, sizeof(struct ipxhdr)); ipx = ipx_hdr(skb); ipx->ipx_pktsize = htons(len + sizeof(struct ipxhdr)); IPX_SKB_CB(skb)->ipx_tctrl = 0; ipx->ipx_type = usipx->sipx_type; IPX_SKB_CB(skb)->last_hop.index = -1; #ifdef CONFIG_IPX_INTERN IPX_SKB_CB(skb)->ipx_source_net = ipxs->intrfc->if_netnum; memcpy(ipx->ipx_source.node, ipxs->node, IPX_NODE_LEN); #else rc = ntohs(ipxs->port); if (rc == 0x453 || rc == 0x452) { /* RIP/SAP special handling for mars_nwe */ IPX_SKB_CB(skb)->ipx_source_net = intrfc->if_netnum; memcpy(ipx->ipx_source.node, intrfc->if_node, IPX_NODE_LEN); } else { IPX_SKB_CB(skb)->ipx_source_net = ipxs->intrfc->if_netnum; memcpy(ipx->ipx_source.node, ipxs->intrfc->if_node, IPX_NODE_LEN); } #endif /* CONFIG_IPX_INTERN */ ipx->ipx_source.sock = ipxs->port; IPX_SKB_CB(skb)->ipx_dest_net = usipx->sipx_network; memcpy(ipx->ipx_dest.node, usipx->sipx_node, IPX_NODE_LEN); ipx->ipx_dest.sock = usipx->sipx_port; rc = memcpy_fromiovec(skb_put(skb, len), iov, len); if (rc) { kfree_skb(skb); goto out_put; } /* Apply checksum. Not allowed on 802.3 links. */ if (sk->sk_no_check || intrfc->if_dlink_type == htons(IPX_FRAME_8023)) ipx->ipx_checksum = htons(0xFFFF); else ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr)); rc = ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ? rt->ir_router_node : ipx->ipx_dest.node); out_put: ipxitf_put(intrfc); if (rt) ipxrtr_put(rt); out: return rc; } /* * We use a normal struct rtentry for route handling */ int ipxrtr_ioctl(unsigned int cmd, void __user *arg) { struct rtentry rt; /* Use these to behave like 'other' stacks */ struct sockaddr_ipx *sg, *st; int rc = -EFAULT; if (copy_from_user(&rt, arg, sizeof(rt))) goto out; sg = (struct sockaddr_ipx *)&rt.rt_gateway; st = (struct sockaddr_ipx *)&rt.rt_dst; rc = -EINVAL; if (!(rt.rt_flags & RTF_GATEWAY) || /* Direct routes are fixed */ sg->sipx_family != AF_IPX || st->sipx_family != AF_IPX) goto out; switch (cmd) { case SIOCDELRT: rc = ipxrtr_delete(st->sipx_network); break; case SIOCADDRT: { struct ipx_route_definition f; f.ipx_network = st->sipx_network; f.ipx_router_network = sg->sipx_network; memcpy(f.ipx_router_node, sg->sipx_node, IPX_NODE_LEN); rc = ipxrtr_create(&f); break; } } out: return rc; }
gpl-2.0
drewis/android_kernel_grouper
net/ipx/ipx_route.c
11768
6962
/* * Implements the IPX routing routines. * Code moved from af_ipx.c. * * Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 2003 * * See net/ipx/ChangeLog. */ #include <linux/list.h> #include <linux/route.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <net/ipx.h> #include <net/sock.h> LIST_HEAD(ipx_routes); DEFINE_RWLOCK(ipx_routes_lock); extern struct ipx_interface *ipx_internal_net; extern __be16 ipx_cksum(struct ipxhdr *packet, int length); extern struct ipx_interface *ipxitf_find_using_net(__be32 net); extern int ipxitf_demux_socket(struct ipx_interface *intrfc, struct sk_buff *skb, int copy); extern int ipxitf_demux_socket(struct ipx_interface *intrfc, struct sk_buff *skb, int copy); extern int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node); extern struct ipx_interface *ipxitf_find_using_net(__be32 net); struct ipx_route *ipxrtr_lookup(__be32 net) { struct ipx_route *r; read_lock_bh(&ipx_routes_lock); list_for_each_entry(r, &ipx_routes, node) if (r->ir_net == net) { ipxrtr_hold(r); goto unlock; } r = NULL; unlock: read_unlock_bh(&ipx_routes_lock); return r; } /* * Caller must hold a reference to intrfc */ int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc, unsigned char *node) { struct ipx_route *rt; int rc; /* Get a route structure; either existing or create */ rt = ipxrtr_lookup(network); if (!rt) { rt = kmalloc(sizeof(*rt), GFP_ATOMIC); rc = -EAGAIN; if (!rt) goto out; atomic_set(&rt->refcnt, 1); ipxrtr_hold(rt); write_lock_bh(&ipx_routes_lock); list_add(&rt->node, &ipx_routes); write_unlock_bh(&ipx_routes_lock); } else { rc = -EEXIST; if (intrfc == ipx_internal_net) goto out_put; } rt->ir_net = network; rt->ir_intrfc = intrfc; if (!node) { memset(rt->ir_router_node, '\0', IPX_NODE_LEN); rt->ir_routed = 0; } else { memcpy(rt->ir_router_node, node, IPX_NODE_LEN); rt->ir_routed = 1; } rc = 0; out_put: ipxrtr_put(rt); out: return rc; } void ipxrtr_del_routes(struct ipx_interface *intrfc) { struct ipx_route *r, *tmp; write_lock_bh(&ipx_routes_lock); list_for_each_entry_safe(r, tmp, &ipx_routes, node) if (r->ir_intrfc == intrfc) { list_del(&r->node); ipxrtr_put(r); } write_unlock_bh(&ipx_routes_lock); } static int ipxrtr_create(struct ipx_route_definition *rd) { struct ipx_interface *intrfc; int rc = -ENETUNREACH; /* Find the appropriate interface */ intrfc = ipxitf_find_using_net(rd->ipx_router_network); if (!intrfc) goto out; rc = ipxrtr_add_route(rd->ipx_network, intrfc, rd->ipx_router_node); ipxitf_put(intrfc); out: return rc; } static int ipxrtr_delete(__be32 net) { struct ipx_route *r, *tmp; int rc; write_lock_bh(&ipx_routes_lock); list_for_each_entry_safe(r, tmp, &ipx_routes, node) if (r->ir_net == net) { /* Directly connected; can't lose route */ rc = -EPERM; if (!r->ir_routed) goto out; list_del(&r->node); ipxrtr_put(r); rc = 0; goto out; } rc = -ENOENT; out: write_unlock_bh(&ipx_routes_lock); return rc; } /* * The skb has to be unshared, we'll end up calling ipxitf_send, that'll * modify the packet */ int ipxrtr_route_skb(struct sk_buff *skb) { struct ipxhdr *ipx = ipx_hdr(skb); struct ipx_route *r = ipxrtr_lookup(IPX_SKB_CB(skb)->ipx_dest_net); if (!r) { /* no known route */ kfree_skb(skb); return 0; } ipxitf_hold(r->ir_intrfc); ipxitf_send(r->ir_intrfc, skb, r->ir_routed ? r->ir_router_node : ipx->ipx_dest.node); ipxitf_put(r->ir_intrfc); ipxrtr_put(r); return 0; } /* * Route an outgoing frame from a socket. */ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, struct iovec *iov, size_t len, int noblock) { struct sk_buff *skb; struct ipx_sock *ipxs = ipx_sk(sk); struct ipx_interface *intrfc; struct ipxhdr *ipx; size_t size; int ipx_offset; struct ipx_route *rt = NULL; int rc; /* Find the appropriate interface on which to send packet */ if (!usipx->sipx_network && ipx_primary_net) { usipx->sipx_network = ipx_primary_net->if_netnum; intrfc = ipx_primary_net; } else { rt = ipxrtr_lookup(usipx->sipx_network); rc = -ENETUNREACH; if (!rt) goto out; intrfc = rt->ir_intrfc; } ipxitf_hold(intrfc); ipx_offset = intrfc->if_ipx_offset; size = sizeof(struct ipxhdr) + len + ipx_offset; skb = sock_alloc_send_skb(sk, size, noblock, &rc); if (!skb) goto out_put; skb_reserve(skb, ipx_offset); skb->sk = sk; /* Fill in IPX header */ skb_reset_network_header(skb); skb_reset_transport_header(skb); skb_put(skb, sizeof(struct ipxhdr)); ipx = ipx_hdr(skb); ipx->ipx_pktsize = htons(len + sizeof(struct ipxhdr)); IPX_SKB_CB(skb)->ipx_tctrl = 0; ipx->ipx_type = usipx->sipx_type; IPX_SKB_CB(skb)->last_hop.index = -1; #ifdef CONFIG_IPX_INTERN IPX_SKB_CB(skb)->ipx_source_net = ipxs->intrfc->if_netnum; memcpy(ipx->ipx_source.node, ipxs->node, IPX_NODE_LEN); #else rc = ntohs(ipxs->port); if (rc == 0x453 || rc == 0x452) { /* RIP/SAP special handling for mars_nwe */ IPX_SKB_CB(skb)->ipx_source_net = intrfc->if_netnum; memcpy(ipx->ipx_source.node, intrfc->if_node, IPX_NODE_LEN); } else { IPX_SKB_CB(skb)->ipx_source_net = ipxs->intrfc->if_netnum; memcpy(ipx->ipx_source.node, ipxs->intrfc->if_node, IPX_NODE_LEN); } #endif /* CONFIG_IPX_INTERN */ ipx->ipx_source.sock = ipxs->port; IPX_SKB_CB(skb)->ipx_dest_net = usipx->sipx_network; memcpy(ipx->ipx_dest.node, usipx->sipx_node, IPX_NODE_LEN); ipx->ipx_dest.sock = usipx->sipx_port; rc = memcpy_fromiovec(skb_put(skb, len), iov, len); if (rc) { kfree_skb(skb); goto out_put; } /* Apply checksum. Not allowed on 802.3 links. */ if (sk->sk_no_check || intrfc->if_dlink_type == htons(IPX_FRAME_8023)) ipx->ipx_checksum = htons(0xFFFF); else ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr)); rc = ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ? rt->ir_router_node : ipx->ipx_dest.node); out_put: ipxitf_put(intrfc); if (rt) ipxrtr_put(rt); out: return rc; } /* * We use a normal struct rtentry for route handling */ int ipxrtr_ioctl(unsigned int cmd, void __user *arg) { struct rtentry rt; /* Use these to behave like 'other' stacks */ struct sockaddr_ipx *sg, *st; int rc = -EFAULT; if (copy_from_user(&rt, arg, sizeof(rt))) goto out; sg = (struct sockaddr_ipx *)&rt.rt_gateway; st = (struct sockaddr_ipx *)&rt.rt_dst; rc = -EINVAL; if (!(rt.rt_flags & RTF_GATEWAY) || /* Direct routes are fixed */ sg->sipx_family != AF_IPX || st->sipx_family != AF_IPX) goto out; switch (cmd) { case SIOCDELRT: rc = ipxrtr_delete(st->sipx_network); break; case SIOCADDRT: { struct ipx_route_definition f; f.ipx_network = st->sipx_network; f.ipx_router_network = sg->sipx_network; memcpy(f.ipx_router_node, sg->sipx_node, IPX_NODE_LEN); rc = ipxrtr_create(&f); break; } } out: return rc; }
gpl-2.0
nikro56/android_kernel_acer_t30s
sound/pci/echoaudio/echoaudio_gml.c
13304
5950
/**************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ /* These functions are common for Gina24, Layla24 and Mona cards */ /* ASIC status check - some cards have one or two ASICs that need to be loaded. Once that load is complete, this function is called to see if the load was successful. If this load fails, it does not necessarily mean that the hardware is defective - the external box may be disconnected or turned off. */ static int check_asic_status(struct echoaudio *chip) { u32 asic_status; send_vector(chip, DSP_VC_TEST_ASIC); /* The DSP will return a value to indicate whether or not the ASIC is currently loaded */ if (read_dsp(chip, &asic_status) < 0) { DE_INIT(("check_asic_status: failed on read_dsp\n")); chip->asic_loaded = FALSE; return -EIO; } chip->asic_loaded = (asic_status == ASIC_ALREADY_LOADED); return chip->asic_loaded ? 0 : -EIO; } /* Most configuration of Gina24, Layla24, or Mona is accomplished by writing the control register. write_control_reg sends the new control register value to the DSP. */ static int write_control_reg(struct echoaudio *chip, u32 value, char force) { /* Handle the digital input auto-mute */ if (chip->digital_in_automute) value |= GML_DIGITAL_IN_AUTO_MUTE; else value &= ~GML_DIGITAL_IN_AUTO_MUTE; DE_ACT(("write_control_reg: 0x%x\n", value)); /* Write the control register */ value = cpu_to_le32(value); if (value != chip->comm_page->control_register || force) { if (wait_handshake(chip)) return -EIO; chip->comm_page->control_register = value; clear_handshake(chip); return send_vector(chip, DSP_VC_WRITE_CONTROL_REG); } return 0; } /* Gina24, Layla24, and Mona support digital input auto-mute. If the digital input auto-mute is enabled, the DSP will only enable the digital inputs if the card is syncing to a valid clock on the ADAT or S/PDIF inputs. If the auto-mute is disabled, the digital inputs are enabled regardless of what the input clock is set or what is connected. */ static int set_input_auto_mute(struct echoaudio *chip, int automute) { DE_ACT(("set_input_auto_mute %d\n", automute)); chip->digital_in_automute = automute; /* Re-set the input clock to the current value - indirectly causes the auto-mute flag to be sent to the DSP */ return set_input_clock(chip, chip->input_clock); } /* S/PDIF coax / S/PDIF optical / ADAT - switch */ static int set_digital_mode(struct echoaudio *chip, u8 mode) { u8 previous_mode; int err, i, o; if (chip->bad_board) return -EIO; /* All audio channels must be closed before changing the digital mode */ if (snd_BUG_ON(chip->pipe_alloc_mask)) return -EAGAIN; if (snd_BUG_ON(!(chip->digital_modes & (1 << mode)))) return -EINVAL; previous_mode = chip->digital_mode; err = dsp_set_digital_mode(chip, mode); /* If we successfully changed the digital mode from or to ADAT, then make sure all output, input and monitor levels are updated by the DSP comm object. */ if (err >= 0 && previous_mode != mode && (previous_mode == DIGITAL_MODE_ADAT || mode == DIGITAL_MODE_ADAT)) { spin_lock_irq(&chip->lock); for (o = 0; o < num_busses_out(chip); o++) for (i = 0; i < num_busses_in(chip); i++) set_monitor_gain(chip, o, i, chip->monitor_gain[o][i]); #ifdef ECHOCARD_HAS_INPUT_GAIN for (i = 0; i < num_busses_in(chip); i++) set_input_gain(chip, i, chip->input_gain[i]); update_input_line_level(chip); #endif for (o = 0; o < num_busses_out(chip); o++) set_output_gain(chip, o, chip->output_gain[o]); update_output_line_level(chip); spin_unlock_irq(&chip->lock); } return err; } /* Set the S/PDIF output format */ static int set_professional_spdif(struct echoaudio *chip, char prof) { u32 control_reg; int err; /* Clear the current S/PDIF flags */ control_reg = le32_to_cpu(chip->comm_page->control_register); control_reg &= GML_SPDIF_FORMAT_CLEAR_MASK; /* Set the new S/PDIF flags depending on the mode */ control_reg |= GML_SPDIF_TWO_CHANNEL | GML_SPDIF_24_BIT | GML_SPDIF_COPY_PERMIT; if (prof) { /* Professional mode */ control_reg |= GML_SPDIF_PRO_MODE; switch (chip->sample_rate) { case 32000: control_reg |= GML_SPDIF_SAMPLE_RATE0 | GML_SPDIF_SAMPLE_RATE1; break; case 44100: control_reg |= GML_SPDIF_SAMPLE_RATE0; break; case 48000: control_reg |= GML_SPDIF_SAMPLE_RATE1; break; } } else { /* Consumer mode */ switch (chip->sample_rate) { case 32000: control_reg |= GML_SPDIF_SAMPLE_RATE0 | GML_SPDIF_SAMPLE_RATE1; break; case 48000: control_reg |= GML_SPDIF_SAMPLE_RATE1; break; } } if ((err = write_control_reg(chip, control_reg, FALSE))) return err; chip->professional_spdif = prof; DE_ACT(("set_professional_spdif to %s\n", prof ? "Professional" : "Consumer")); return 0; }
gpl-2.0
weizhenwei/fastsocket
kernel/drivers/pcmcia/tcic.c
505
24685
/*====================================================================== Device driver for Databook TCIC-2 PCMCIA controller tcic.c 1.111 2000/02/15 04:13:12 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/system.h> #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> #include <pcmcia/ss.h> #include "tcic.h" #ifdef CONFIG_PCMCIA_DEBUG static int pc_debug; module_param(pc_debug, int, 0644); static const char version[] = "tcic.c 1.111 2000/02/15 04:13:12 (David Hinds)"; #define debug(lvl, fmt, arg...) do { \ if (pc_debug > (lvl)) \ printk(KERN_DEBUG "tcic: " fmt , ## arg); \ } while (0) #else #define debug(lvl, fmt, arg...) do { } while (0) #endif MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("Databook TCIC-2 PCMCIA socket driver"); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/ /* Parameters that can be set with 'insmod' */ /* The base port address of the TCIC-2 chip */ static unsigned long tcic_base = TCIC_BASE; /* Specify a socket number to ignore */ static int ignore = -1; /* Probe for safe interrupts? */ static int do_scan = 1; /* Bit map of interrupts to choose from */ static u_int irq_mask = 0xffff; static int irq_list[16]; static unsigned int irq_list_count; /* The card status change interrupt -- 0 means autoselect */ static int cs_irq; /* Poll status interval -- 0 means default to interrupt */ static int poll_interval; /* Delay for card status double-checking */ static int poll_quick = HZ/20; /* CCLK external clock time, in nanoseconds. 70 ns = 14.31818 MHz */ static int cycle_time = 70; module_param(tcic_base, ulong, 0444); module_param(ignore, int, 0444); module_param(do_scan, int, 0444); module_param(irq_mask, int, 0444); module_param_array(irq_list, int, &irq_list_count, 0444); module_param(cs_irq, int, 0444); module_param(poll_interval, int, 0444); module_param(poll_quick, int, 0444); module_param(cycle_time, int, 0444); /*====================================================================*/ static irqreturn_t tcic_interrupt(int irq, void *dev); static void tcic_timer(u_long data); static struct pccard_operations tcic_operations; struct tcic_socket { u_short psock; u_char last_sstat; u_char id; struct pcmcia_socket socket; }; static struct timer_list poll_timer; static int tcic_timer_pending; static int sockets; static struct tcic_socket socket_table[2]; /*====================================================================*/ /* Trick when selecting interrupts: the TCIC sktirq pin is supposed to map to irq 11, but is coded as 0 or 1 in the irq registers. */ #define TCIC_IRQ(x) ((x) ? (((x) == 11) ? 1 : (x)) : 15) #ifdef DEBUG_X static u_char tcic_getb(u_char reg) { u_char val = inb(tcic_base+reg); printk(KERN_DEBUG "tcic_getb(%#lx) = %#x\n", tcic_base+reg, val); return val; } static u_short tcic_getw(u_char reg) { u_short val = inw(tcic_base+reg); printk(KERN_DEBUG "tcic_getw(%#lx) = %#x\n", tcic_base+reg, val); return val; } static void tcic_setb(u_char reg, u_char data) { printk(KERN_DEBUG "tcic_setb(%#lx, %#x)\n", tcic_base+reg, data); outb(data, tcic_base+reg); } static void tcic_setw(u_char reg, u_short data) { printk(KERN_DEBUG "tcic_setw(%#lx, %#x)\n", tcic_base+reg, data); outw(data, tcic_base+reg); } #else #define tcic_getb(reg) inb(tcic_base+reg) #define tcic_getw(reg) inw(tcic_base+reg) #define tcic_setb(reg, data) outb(data, tcic_base+reg) #define tcic_setw(reg, data) outw(data, tcic_base+reg) #endif static void tcic_setl(u_char reg, u_int data) { #ifdef DEBUG_X printk(KERN_DEBUG "tcic_setl(%#x, %#lx)\n", tcic_base+reg, data); #endif outw(data & 0xffff, tcic_base+reg); outw(data >> 16, tcic_base+reg+2); } static void tcic_aux_setb(u_short reg, u_char data) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); tcic_setb(TCIC_AUX, data); } static u_short tcic_aux_getw(u_short reg) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); return tcic_getw(TCIC_AUX); } static void tcic_aux_setw(u_short reg, u_short data) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); tcic_setw(TCIC_AUX, data); } /*====================================================================*/ /* Time conversion functions */ static int to_cycles(int ns) { if (ns < 14) return 0; else return 2*(ns-14)/cycle_time; } /*====================================================================*/ static volatile u_int irq_hits; static irqreturn_t __init tcic_irq_count(int irq, void *dev) { irq_hits++; return IRQ_HANDLED; } static u_int __init try_irq(int irq) { u_short cfg; irq_hits = 0; if (request_irq(irq, tcic_irq_count, 0, "irq scan", tcic_irq_count) != 0) return -1; mdelay(10); if (irq_hits) { free_irq(irq, tcic_irq_count); return -1; } /* Generate one interrupt */ cfg = TCIC_SYSCFG_AUTOBUSY | 0x0a00; tcic_aux_setw(TCIC_AUX_SYSCFG, cfg | TCIC_IRQ(irq)); tcic_setb(TCIC_IENA, TCIC_IENA_ERR | TCIC_IENA_CFG_HIGH); tcic_setb(TCIC_ICSR, TCIC_ICSR_ERR | TCIC_ICSR_JAM); udelay(1000); free_irq(irq, tcic_irq_count); /* Turn off interrupts */ tcic_setb(TCIC_IENA, TCIC_IENA_CFG_OFF); while (tcic_getb(TCIC_ICSR)) tcic_setb(TCIC_ICSR, TCIC_ICSR_JAM); tcic_aux_setw(TCIC_AUX_SYSCFG, cfg); return (irq_hits != 1); } static u_int __init irq_scan(u_int mask0) { u_int mask1; int i; #ifdef __alpha__ #define PIC 0x4d0 /* Don't probe level-triggered interrupts -- reserved for PCI */ int level_mask = inb_p(PIC) | (inb_p(PIC+1) << 8); if (level_mask) mask0 &= ~level_mask; #endif mask1 = 0; if (do_scan) { for (i = 0; i < 16; i++) if ((mask0 & (1 << i)) && (try_irq(i) == 0)) mask1 |= (1 << i); for (i = 0; i < 16; i++) if ((mask1 & (1 << i)) && (try_irq(i) != 0)) { mask1 ^= (1 << i); } } if (mask1) { printk("scanned"); } else { /* Fallback: just find interrupts that aren't in use */ for (i = 0; i < 16; i++) if ((mask0 & (1 << i)) && (request_irq(i, tcic_irq_count, 0, "x", tcic_irq_count) == 0)) { mask1 |= (1 << i); free_irq(i, tcic_irq_count); } printk("default"); } printk(") = "); for (i = 0; i < 16; i++) if (mask1 & (1<<i)) printk("%s%d", ((mask1 & ((1<<i)-1)) ? "," : ""), i); printk(" "); return mask1; } /*====================================================================== See if a card is present, powered up, in IO mode, and already bound to a (non-PCMCIA) Linux driver. We make an exception for cards that look like serial devices. ======================================================================*/ static int __init is_active(int s) { u_short scf1, ioctl, base, num; u_char pwr, sstat; u_int addr; tcic_setl(TCIC_ADDR, (s << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(s)); scf1 = tcic_getw(TCIC_DATA); pwr = tcic_getb(TCIC_PWR); sstat = tcic_getb(TCIC_SSTAT); addr = TCIC_IWIN(s, 0); tcic_setw(TCIC_ADDR, addr + TCIC_IBASE_X); base = tcic_getw(TCIC_DATA); tcic_setw(TCIC_ADDR, addr + TCIC_ICTL_X); ioctl = tcic_getw(TCIC_DATA); if (ioctl & TCIC_ICTL_TINY) num = 1; else { num = (base ^ (base-1)); base = base & (base-1); } if ((sstat & TCIC_SSTAT_CD) && (pwr & TCIC_PWR_VCC(s)) && (scf1 & TCIC_SCF1_IOSTS) && (ioctl & TCIC_ICTL_ENA) && ((base & 0xfeef) != 0x02e8)) { struct resource *res = request_region(base, num, "tcic-2"); if (!res) /* region is busy */ return 1; release_region(base, num); } return 0; } /*====================================================================== This returns the revision code for the specified socket. ======================================================================*/ static int __init get_tcic_id(void) { u_short id; tcic_aux_setw(TCIC_AUX_TEST, TCIC_TEST_DIAG); id = tcic_aux_getw(TCIC_AUX_ILOCK); id = (id & TCIC_ILOCKTEST_ID_MASK) >> TCIC_ILOCKTEST_ID_SH; tcic_aux_setw(TCIC_AUX_TEST, 0); return id; } static int tcic_drv_pcmcia_suspend(struct platform_device *dev, pm_message_t state) { return pcmcia_socket_dev_suspend(&dev->dev); } static int tcic_drv_pcmcia_resume(struct platform_device *dev) { return pcmcia_socket_dev_resume(&dev->dev); } /*====================================================================*/ static struct platform_driver tcic_driver = { .driver = { .name = "tcic-pcmcia", .owner = THIS_MODULE, }, .suspend = tcic_drv_pcmcia_suspend, .resume = tcic_drv_pcmcia_resume, }; static struct platform_device tcic_device = { .name = "tcic-pcmcia", .id = 0, }; static int __init init_tcic(void) { int i, sock, ret = 0; u_int mask, scan; if (platform_driver_register(&tcic_driver)) return -1; printk(KERN_INFO "Databook TCIC-2 PCMCIA probe: "); sock = 0; if (!request_region(tcic_base, 16, "tcic-2")) { printk("could not allocate ports,\n "); platform_driver_unregister(&tcic_driver); return -ENODEV; } else { tcic_setw(TCIC_ADDR, 0); if (tcic_getw(TCIC_ADDR) == 0) { tcic_setw(TCIC_ADDR, 0xc3a5); if (tcic_getw(TCIC_ADDR) == 0xc3a5) sock = 2; } if (sock == 0) { /* See if resetting the controller does any good */ tcic_setb(TCIC_SCTRL, TCIC_SCTRL_RESET); tcic_setb(TCIC_SCTRL, 0); tcic_setw(TCIC_ADDR, 0); if (tcic_getw(TCIC_ADDR) == 0) { tcic_setw(TCIC_ADDR, 0xc3a5); if (tcic_getw(TCIC_ADDR) == 0xc3a5) sock = 2; } } } if (sock == 0) { printk("not found.\n"); release_region(tcic_base, 16); platform_driver_unregister(&tcic_driver); return -ENODEV; } sockets = 0; for (i = 0; i < sock; i++) { if ((i == ignore) || is_active(i)) continue; socket_table[sockets].psock = i; socket_table[sockets].id = get_tcic_id(); socket_table[sockets].socket.owner = THIS_MODULE; /* only 16-bit cards, memory windows must be size-aligned */ /* No PCI or CardBus support */ socket_table[sockets].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN; /* irq 14, 11, 10, 7, 6, 5, 4, 3 */ socket_table[sockets].socket.irq_mask = 0x4cf8; /* 4K minimum window size */ socket_table[sockets].socket.map_size = 0x1000; sockets++; } switch (socket_table[0].id) { case TCIC_ID_DB86082: printk("DB86082"); break; case TCIC_ID_DB86082A: printk("DB86082A"); break; case TCIC_ID_DB86084: printk("DB86084"); break; case TCIC_ID_DB86084A: printk("DB86084A"); break; case TCIC_ID_DB86072: printk("DB86072"); break; case TCIC_ID_DB86184: printk("DB86184"); break; case TCIC_ID_DB86082B: printk("DB86082B"); break; default: printk("Unknown ID 0x%02x", socket_table[0].id); } /* Set up polling */ poll_timer.function = &tcic_timer; poll_timer.data = 0; init_timer(&poll_timer); /* Build interrupt mask */ printk(KERN_CONT ", %d sockets\n", sockets); printk(KERN_INFO " irq list ("); if (irq_list_count == 0) mask = irq_mask; else for (i = mask = 0; i < irq_list_count; i++) mask |= (1<<irq_list[i]); /* irq 14, 11, 10, 7, 6, 5, 4, 3 */ mask &= 0x4cf8; /* Scan interrupts */ mask = irq_scan(mask); for (i=0;i<sockets;i++) socket_table[i].socket.irq_mask = mask; /* Check for only two interrupts available */ scan = (mask & (mask-1)); if (((scan & (scan-1)) == 0) && (poll_interval == 0)) poll_interval = HZ; if (poll_interval == 0) { /* Avoid irq 12 unless it is explicitly requested */ u_int cs_mask = mask & ((cs_irq) ? (1<<cs_irq) : ~(1<<12)); for (i = 15; i > 0; i--) if ((cs_mask & (1 << i)) && (request_irq(i, tcic_interrupt, 0, "tcic", tcic_interrupt) == 0)) break; cs_irq = i; if (cs_irq == 0) poll_interval = HZ; } if (socket_table[0].socket.irq_mask & (1 << 11)) printk("sktirq is irq 11, "); if (cs_irq != 0) printk("status change on irq %d\n", cs_irq); else printk("polled status, interval = %d ms\n", poll_interval * 1000 / HZ); for (i = 0; i < sockets; i++) { tcic_setw(TCIC_ADDR+2, socket_table[i].psock << TCIC_SS_SHFT); socket_table[i].last_sstat = tcic_getb(TCIC_SSTAT); } /* jump start interrupt handler, if needed */ tcic_interrupt(0, NULL); platform_device_register(&tcic_device); for (i = 0; i < sockets; i++) { socket_table[i].socket.ops = &tcic_operations; socket_table[i].socket.resource_ops = &pccard_nonstatic_ops; socket_table[i].socket.dev.parent = &tcic_device.dev; ret = pcmcia_register_socket(&socket_table[i].socket); if (ret && i) pcmcia_unregister_socket(&socket_table[0].socket); } return ret; return 0; } /* init_tcic */ /*====================================================================*/ static void __exit exit_tcic(void) { int i; del_timer_sync(&poll_timer); if (cs_irq != 0) { tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00); free_irq(cs_irq, tcic_interrupt); } release_region(tcic_base, 16); for (i = 0; i < sockets; i++) { pcmcia_unregister_socket(&socket_table[i].socket); } platform_device_unregister(&tcic_device); platform_driver_unregister(&tcic_driver); } /* exit_tcic */ /*====================================================================*/ static irqreturn_t tcic_interrupt(int irq, void *dev) { int i, quick = 0; u_char latch, sstat; u_short psock; u_int events; static volatile int active = 0; if (active) { printk(KERN_NOTICE "tcic: reentered interrupt handler!\n"); return IRQ_NONE; } else active = 1; debug(2, "tcic_interrupt()\n"); for (i = 0; i < sockets; i++) { psock = socket_table[i].psock; tcic_setl(TCIC_ADDR, (psock << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(psock)); sstat = tcic_getb(TCIC_SSTAT); latch = sstat ^ socket_table[psock].last_sstat; socket_table[i].last_sstat = sstat; if (tcic_getb(TCIC_ICSR) & TCIC_ICSR_CDCHG) { tcic_setb(TCIC_ICSR, TCIC_ICSR_CLEAR); quick = 1; } if (latch == 0) continue; events = (latch & TCIC_SSTAT_CD) ? SS_DETECT : 0; events |= (latch & TCIC_SSTAT_WP) ? SS_WRPROT : 0; if (tcic_getw(TCIC_DATA) & TCIC_SCF1_IOSTS) { events |= (latch & TCIC_SSTAT_LBAT1) ? SS_STSCHG : 0; } else { events |= (latch & TCIC_SSTAT_RDY) ? SS_READY : 0; events |= (latch & TCIC_SSTAT_LBAT1) ? SS_BATDEAD : 0; events |= (latch & TCIC_SSTAT_LBAT2) ? SS_BATWARN : 0; } if (events) { pcmcia_parse_events(&socket_table[i].socket, events); } } /* Schedule next poll, if needed */ if (((cs_irq == 0) || quick) && (!tcic_timer_pending)) { poll_timer.expires = jiffies + (quick ? poll_quick : poll_interval); add_timer(&poll_timer); tcic_timer_pending = 1; } active = 0; debug(2, "interrupt done\n"); return IRQ_HANDLED; } /* tcic_interrupt */ static void tcic_timer(u_long data) { debug(2, "tcic_timer()\n"); tcic_timer_pending = 0; tcic_interrupt(0, NULL); } /* tcic_timer */ /*====================================================================*/ static int tcic_get_status(struct pcmcia_socket *sock, u_int *value) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_char reg; tcic_setl(TCIC_ADDR, (psock << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(psock)); reg = tcic_getb(TCIC_SSTAT); *value = (reg & TCIC_SSTAT_CD) ? SS_DETECT : 0; *value |= (reg & TCIC_SSTAT_WP) ? SS_WRPROT : 0; if (tcic_getw(TCIC_DATA) & TCIC_SCF1_IOSTS) { *value |= (reg & TCIC_SSTAT_LBAT1) ? SS_STSCHG : 0; } else { *value |= (reg & TCIC_SSTAT_RDY) ? SS_READY : 0; *value |= (reg & TCIC_SSTAT_LBAT1) ? SS_BATDEAD : 0; *value |= (reg & TCIC_SSTAT_LBAT2) ? SS_BATWARN : 0; } reg = tcic_getb(TCIC_PWR); if (reg & (TCIC_PWR_VCC(psock)|TCIC_PWR_VPP(psock))) *value |= SS_POWERON; debug(1, "GetStatus(%d) = %#2.2x\n", psock, *value); return 0; } /* tcic_get_status */ /*====================================================================*/ static int tcic_set_socket(struct pcmcia_socket *sock, socket_state_t *state) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_char reg; u_short scf1, scf2; debug(1, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " "io_irq %d, csc_mask %#2.2x)\n", psock, state->flags, state->Vcc, state->Vpp, state->io_irq, state->csc_mask); tcic_setw(TCIC_ADDR+2, (psock << TCIC_SS_SHFT) | TCIC_ADR2_INDREG); reg = tcic_getb(TCIC_PWR); reg &= ~(TCIC_PWR_VCC(psock) | TCIC_PWR_VPP(psock)); if (state->Vcc == 50) { switch (state->Vpp) { case 0: reg |= TCIC_PWR_VCC(psock) | TCIC_PWR_VPP(psock); break; case 50: reg |= TCIC_PWR_VCC(psock); break; case 120: reg |= TCIC_PWR_VPP(psock); break; default: return -EINVAL; } } else if (state->Vcc != 0) return -EINVAL; if (reg != tcic_getb(TCIC_PWR)) tcic_setb(TCIC_PWR, reg); reg = TCIC_ILOCK_HOLD_CCLK | TCIC_ILOCK_CWAIT; if (state->flags & SS_OUTPUT_ENA) { tcic_setb(TCIC_SCTRL, TCIC_SCTRL_ENA); reg |= TCIC_ILOCK_CRESENA; } else tcic_setb(TCIC_SCTRL, 0); if (state->flags & SS_RESET) reg |= TCIC_ILOCK_CRESET; tcic_aux_setb(TCIC_AUX_ILOCK, reg); tcic_setw(TCIC_ADDR, TCIC_SCF1(psock)); scf1 = TCIC_SCF1_FINPACK; scf1 |= TCIC_IRQ(state->io_irq); if (state->flags & SS_IOCARD) { scf1 |= TCIC_SCF1_IOSTS; if (state->flags & SS_SPKR_ENA) scf1 |= TCIC_SCF1_SPKR; if (state->flags & SS_DMA_MODE) scf1 |= TCIC_SCF1_DREQ2 << TCIC_SCF1_DMA_SHIFT; } tcic_setw(TCIC_DATA, scf1); /* Some general setup stuff, and configure status interrupt */ reg = TCIC_WAIT_ASYNC | TCIC_WAIT_SENSE | to_cycles(250); tcic_aux_setb(TCIC_AUX_WCTL, reg); tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00| TCIC_IRQ(cs_irq)); /* Card status change interrupt mask */ tcic_setw(TCIC_ADDR, TCIC_SCF2(psock)); scf2 = TCIC_SCF2_MALL; if (state->csc_mask & SS_DETECT) scf2 &= ~TCIC_SCF2_MCD; if (state->flags & SS_IOCARD) { if (state->csc_mask & SS_STSCHG) reg &= ~TCIC_SCF2_MLBAT1; } else { if (state->csc_mask & SS_BATDEAD) reg &= ~TCIC_SCF2_MLBAT1; if (state->csc_mask & SS_BATWARN) reg &= ~TCIC_SCF2_MLBAT2; if (state->csc_mask & SS_READY) reg &= ~TCIC_SCF2_MRDY; } tcic_setw(TCIC_DATA, scf2); /* For the ISA bus, the irq should be active-high totem-pole */ tcic_setb(TCIC_IENA, TCIC_IENA_CDCHG | TCIC_IENA_CFG_HIGH); return 0; } /* tcic_set_socket */ /*====================================================================*/ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_int addr; u_short base, len, ioctl; debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed, (unsigned long long)io->start, (unsigned long long)io->stop); if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)) return -EINVAL; tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); addr = TCIC_IWIN(psock, io->map); base = io->start; len = io->stop - io->start; /* Check to see that len+1 is power of two, etc */ if ((len & (len+1)) || (base & len)) return -EINVAL; base |= (len+1)>>1; tcic_setw(TCIC_ADDR, addr + TCIC_IBASE_X); tcic_setw(TCIC_DATA, base); ioctl = (psock << TCIC_ICTL_SS_SHFT); ioctl |= (len == 0) ? TCIC_ICTL_TINY : 0; ioctl |= (io->flags & MAP_ACTIVE) ? TCIC_ICTL_ENA : 0; ioctl |= to_cycles(io->speed) & TCIC_ICTL_WSCNT_MASK; if (!(io->flags & MAP_AUTOSZ)) { ioctl |= TCIC_ICTL_QUIET; ioctl |= (io->flags & MAP_16BIT) ? TCIC_ICTL_BW_16 : TCIC_ICTL_BW_8; } tcic_setw(TCIC_ADDR, addr + TCIC_ICTL_X); tcic_setw(TCIC_DATA, ioctl); return 0; } /* tcic_set_io_map */ /*====================================================================*/ static int tcic_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_short addr, ctl; u_long base, len, mmap; debug(1, "SetMemMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags, mem->speed, (unsigned long long)mem->res->start, (unsigned long long)mem->res->end, mem->card_start); if ((mem->map > 3) || (mem->card_start > 0x3ffffff) || (mem->res->start > 0xffffff) || (mem->res->end > 0xffffff) || (mem->res->start > mem->res->end) || (mem->speed > 1000)) return -EINVAL; tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); addr = TCIC_MWIN(psock, mem->map); base = mem->res->start; len = mem->res->end - mem->res->start; if ((len & (len+1)) || (base & len)) return -EINVAL; if (len == 0x0fff) base = (base >> TCIC_MBASE_HA_SHFT) | TCIC_MBASE_4K_BIT; else base = (base | (len+1)>>1) >> TCIC_MBASE_HA_SHFT; tcic_setw(TCIC_ADDR, addr + TCIC_MBASE_X); tcic_setw(TCIC_DATA, base); mmap = mem->card_start - mem->res->start; mmap = (mmap >> TCIC_MMAP_CA_SHFT) & TCIC_MMAP_CA_MASK; if (mem->flags & MAP_ATTRIB) mmap |= TCIC_MMAP_REG; tcic_setw(TCIC_ADDR, addr + TCIC_MMAP_X); tcic_setw(TCIC_DATA, mmap); ctl = TCIC_MCTL_QUIET | (psock << TCIC_MCTL_SS_SHFT); ctl |= to_cycles(mem->speed) & TCIC_MCTL_WSCNT_MASK; ctl |= (mem->flags & MAP_16BIT) ? 0 : TCIC_MCTL_B8; ctl |= (mem->flags & MAP_WRPROT) ? TCIC_MCTL_WP : 0; ctl |= (mem->flags & MAP_ACTIVE) ? TCIC_MCTL_ENA : 0; tcic_setw(TCIC_ADDR, addr + TCIC_MCTL_X); tcic_setw(TCIC_DATA, ctl); return 0; } /* tcic_set_mem_map */ /*====================================================================*/ static int tcic_init(struct pcmcia_socket *s) { int i; struct resource res = { .start = 0, .end = 0x1000 }; pccard_io_map io = { 0, 0, 0, 0, 1 }; pccard_mem_map mem = { .res = &res, }; for (i = 0; i < 2; i++) { io.map = i; tcic_set_io_map(s, &io); } for (i = 0; i < 5; i++) { mem.map = i; tcic_set_mem_map(s, &mem); } return 0; } static struct pccard_operations tcic_operations = { .init = tcic_init, .get_status = tcic_get_status, .set_socket = tcic_set_socket, .set_io_map = tcic_set_io_map, .set_mem_map = tcic_set_mem_map, }; /*====================================================================*/ module_init(init_tcic); module_exit(exit_tcic);
gpl-2.0
pjsports/kernel-2.6.32.9-A88
drivers/pcmcia/m8xx_pcmcia.c
505
33003
/* * m8xx_pcmcia.c - Linux PCMCIA socket driver for the mpc8xx series. * * (C) 1999-2000 Magnus Damm <damm@opensource.se> * (C) 2001-2002 Montavista Software, Inc. * <mlocke@mvista.com> * * Support for two slots by Cyclades Corporation * <oliver.kurth@cyclades.de> * Further fixes, v2.6 kernel port * <marcelo.tosatti@cyclades.com> * * Some fixes, additions (C) 2005-2007 Montavista Software, Inc. * <vbordug@ru.mvista.com> * * "The ExCA standard specifies that socket controllers should provide * two IO and five memory windows per socket, which can be independently * configured and positioned in the host address space and mapped to * arbitrary segments of card address space. " - David A Hinds. 1999 * * This controller does _not_ meet the ExCA standard. * * m8xx pcmcia controller brief info: * + 8 windows (attrib, mem, i/o) * + up to two slots (SLOT_A and SLOT_B) * + inputpins, outputpins, event and mask registers. * - no offset register. sigh. * * Because of the lacking offset register we must map the whole card. * We assign each memory window PCMCIA_MEM_WIN_SIZE address space. * Make sure there is (PCMCIA_MEM_WIN_SIZE * PCMCIA_MEM_WIN_NO * * PCMCIA_SOCKETS_NO) bytes at PCMCIA_MEM_WIN_BASE. * The i/o windows are dynamically allocated at PCMCIA_IO_WIN_BASE. * They are maximum 64KByte each... */ #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fsl_devices.h> #include <linux/bitops.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <asm/io.h> #include <asm/system.h> #include <asm/time.h> #include <asm/mpc8xx.h> #include <asm/8xx_immap.h> #include <asm/irq.h> #include <asm/fs_pd.h> #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> #include <pcmcia/ss.h> #ifdef CONFIG_PCMCIA_DEBUG static int pc_debug; module_param(pc_debug, int, 0); #define dprintk(args...) printk(KERN_DEBUG "m8xx_pcmcia: " args); #else #define dprintk(args...) #endif #define pcmcia_info(args...) printk(KERN_INFO "m8xx_pcmcia: "args) #define pcmcia_error(args...) printk(KERN_ERR "m8xx_pcmcia: "args) static const char *version = "Version 0.06, Aug 2005"; MODULE_LICENSE("Dual MPL/GPL"); #if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) /* The RPX series use SLOT_B */ #if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE) #define CONFIG_PCMCIA_SLOT_B #define CONFIG_BD_IS_MHZ #endif /* The ADS board use SLOT_A */ #ifdef CONFIG_ADS #define CONFIG_PCMCIA_SLOT_A #define CONFIG_BD_IS_MHZ #endif /* The FADS series are a mess */ #ifdef CONFIG_FADS #if defined(CONFIG_MPC860T) || defined(CONFIG_MPC860) || defined(CONFIG_MPC821) #define CONFIG_PCMCIA_SLOT_A #else #define CONFIG_PCMCIA_SLOT_B #endif #endif #if defined(CONFIG_MPC885ADS) #define CONFIG_PCMCIA_SLOT_A #define PCMCIA_GLITCHY_CD #endif /* Cyclades ACS uses both slots */ #ifdef CONFIG_PRxK #define CONFIG_PCMCIA_SLOT_A #define CONFIG_PCMCIA_SLOT_B #endif #endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */ #if defined(CONFIG_PCMCIA_SLOT_A) && defined(CONFIG_PCMCIA_SLOT_B) #define PCMCIA_SOCKETS_NO 2 /* We have only 8 windows, dualsocket support will be limited. */ #define PCMCIA_MEM_WIN_NO 2 #define PCMCIA_IO_WIN_NO 2 #define PCMCIA_SLOT_MSG "SLOT_A and SLOT_B" #elif defined(CONFIG_PCMCIA_SLOT_A) || defined(CONFIG_PCMCIA_SLOT_B) #define PCMCIA_SOCKETS_NO 1 /* full support for one slot */ #define PCMCIA_MEM_WIN_NO 5 #define PCMCIA_IO_WIN_NO 2 /* define _slot_ to be able to optimize macros */ #ifdef CONFIG_PCMCIA_SLOT_A #define _slot_ 0 #define PCMCIA_SLOT_MSG "SLOT_A" #else #define _slot_ 1 #define PCMCIA_SLOT_MSG "SLOT_B" #endif #else #error m8xx_pcmcia: Bad configuration! #endif /* ------------------------------------------------------------------------- */ #define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */ #define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */ #define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */ /* ------------------------------------------------------------------------- */ static int pcmcia_schlvl; static DEFINE_SPINLOCK(events_lock); #define PCMCIA_SOCKET_KEY_5V 1 #define PCMCIA_SOCKET_KEY_LV 2 /* look up table for pgcrx registers */ static u32 *m8xx_pgcrx[2]; /* * This structure is used to address each window in the PCMCIA controller. * * Keep in mind that we assume that pcmcia_win[n+1] is mapped directly * after pcmcia_win[n]... */ struct pcmcia_win { u32 br; u32 or; }; /* * For some reason the hardware guys decided to make both slots share * some registers. * * Could someone invent object oriented hardware ? * * The macros are used to get the right bit from the registers. * SLOT_A : slot = 0 * SLOT_B : slot = 1 */ #define M8XX_PCMCIA_VS1(slot) (0x80000000 >> (slot << 4)) #define M8XX_PCMCIA_VS2(slot) (0x40000000 >> (slot << 4)) #define M8XX_PCMCIA_VS_MASK(slot) (0xc0000000 >> (slot << 4)) #define M8XX_PCMCIA_VS_SHIFT(slot) (30 - (slot << 4)) #define M8XX_PCMCIA_WP(slot) (0x20000000 >> (slot << 4)) #define M8XX_PCMCIA_CD2(slot) (0x10000000 >> (slot << 4)) #define M8XX_PCMCIA_CD1(slot) (0x08000000 >> (slot << 4)) #define M8XX_PCMCIA_BVD2(slot) (0x04000000 >> (slot << 4)) #define M8XX_PCMCIA_BVD1(slot) (0x02000000 >> (slot << 4)) #define M8XX_PCMCIA_RDY(slot) (0x01000000 >> (slot << 4)) #define M8XX_PCMCIA_RDY_L(slot) (0x00800000 >> (slot << 4)) #define M8XX_PCMCIA_RDY_H(slot) (0x00400000 >> (slot << 4)) #define M8XX_PCMCIA_RDY_R(slot) (0x00200000 >> (slot << 4)) #define M8XX_PCMCIA_RDY_F(slot) (0x00100000 >> (slot << 4)) #define M8XX_PCMCIA_MASK(slot) (0xFFFF0000 >> (slot << 4)) #define M8XX_PCMCIA_POR_VALID 0x00000001 #define M8XX_PCMCIA_POR_WRPROT 0x00000002 #define M8XX_PCMCIA_POR_ATTRMEM 0x00000010 #define M8XX_PCMCIA_POR_IO 0x00000018 #define M8XX_PCMCIA_POR_16BIT 0x00000040 #define M8XX_PGCRX(slot) m8xx_pgcrx[slot] #define M8XX_PGCRX_CXOE 0x00000080 #define M8XX_PGCRX_CXRESET 0x00000040 /* we keep one lookup table per socket to check flags */ #define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */ struct event_table { u32 regbit; u32 eventbit; }; static const char driver_name[] = "m8xx-pcmcia"; struct socket_info { void (*handler) (void *info, u32 events); void *info; u32 slot; pcmconf8xx_t *pcmcia; u32 bus_freq; int hwirq; socket_state_t state; struct pccard_mem_map mem_win[PCMCIA_MEM_WIN_NO]; struct pccard_io_map io_win[PCMCIA_IO_WIN_NO]; struct event_table events[PCMCIA_EVENTS_MAX]; struct pcmcia_socket socket; }; static struct socket_info socket[PCMCIA_SOCKETS_NO]; /* * Search this table to see if the windowsize is * supported... */ #define M8XX_SIZES_NO 32 static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] = { 0x00000001, 0x00000002, 0x00000008, 0x00000004, 0x00000080, 0x00000040, 0x00000010, 0x00000020, 0x00008000, 0x00004000, 0x00001000, 0x00002000, 0x00000100, 0x00000200, 0x00000800, 0x00000400, 0x0fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x01000000, 0x02000000, 0xffffffff, 0x04000000, 0x00010000, 0x00020000, 0x00080000, 0x00040000, 0x00800000, 0x00400000, 0x00100000, 0x00200000 }; /* ------------------------------------------------------------------------- */ static irqreturn_t m8xx_interrupt(int irq, void *dev); #define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */ /* ------------------------------------------------------------------------- */ /* board specific stuff: */ /* voltage_set(), hardware_enable() and hardware_disable() */ /* ------------------------------------------------------------------------- */ /* RPX Boards from Embedded Planet */ #if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE) /* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks. * SYPCR is write once only, therefore must the slowest memory be faster * than the bus monitor or we will get a machine check due to the bus timeout. */ #define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE" #undef PCMCIA_BMT_LIMIT #define PCMCIA_BMT_LIMIT (6*8) static int voltage_set(int slot, int vcc, int vpp) { u32 reg = 0; switch (vcc) { case 0: break; case 33: reg |= BCSR1_PCVCTL4; break; case 50: reg |= BCSR1_PCVCTL5; break; default: return 1; } switch (vpp) { case 0: break; case 33: case 50: if (vcc == vpp) reg |= BCSR1_PCVCTL6; else return 1; break; case 120: reg |= BCSR1_PCVCTL7; default: return 1; } if (!((vcc == 50) || (vcc == 0))) return 1; /* first, turn off all power */ out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 | BCSR1_PCVCTL5 | BCSR1_PCVCTL6 | BCSR1_PCVCTL7)); /* enable new powersettings */ out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg); return 0; } #define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V #define hardware_enable(_slot_) /* No hardware to enable */ #define hardware_disable(_slot_) /* No hardware to disable */ #endif /* CONFIG_RPXCLASSIC */ /* FADS Boards from Motorola */ #if defined(CONFIG_FADS) #define PCMCIA_BOARD_MSG "FADS" static int voltage_set(int slot, int vcc, int vpp) { u32 reg = 0; switch (vcc) { case 0: break; case 33: reg |= BCSR1_PCCVCC0; break; case 50: reg |= BCSR1_PCCVCC1; break; default: return 1; } switch (vpp) { case 0: break; case 33: case 50: if (vcc == vpp) reg |= BCSR1_PCCVPP1; else return 1; break; case 120: if ((vcc == 33) || (vcc == 50)) reg |= BCSR1_PCCVPP0; else return 1; default: return 1; } /* first, turn off all power */ out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) & ~(BCSR1_PCCVCC_MASK | BCSR1_PCCVPP_MASK)); /* enable new powersettings */ out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | reg); return 0; } #define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V static void hardware_enable(int slot) { out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) & ~BCSR1_PCCEN); } static void hardware_disable(int slot) { out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | BCSR1_PCCEN); } #endif /* MPC885ADS Boards */ #if defined(CONFIG_MPC885ADS) #define PCMCIA_BOARD_MSG "MPC885ADS" #define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V static inline void hardware_enable(int slot) { m8xx_pcmcia_ops.hw_ctrl(slot, 1); } static inline void hardware_disable(int slot) { m8xx_pcmcia_ops.hw_ctrl(slot, 0); } static inline int voltage_set(int slot, int vcc, int vpp) { return m8xx_pcmcia_ops.voltage_set(slot, vcc, vpp); } #endif /* ------------------------------------------------------------------------- */ /* Motorola MBX860 */ #if defined(CONFIG_MBX) #define PCMCIA_BOARD_MSG "MBX" static int voltage_set(int slot, int vcc, int vpp) { u8 reg = 0; switch (vcc) { case 0: break; case 33: reg |= CSR2_VCC_33; break; case 50: reg |= CSR2_VCC_50; break; default: return 1; } switch (vpp) { case 0: break; case 33: case 50: if (vcc == vpp) reg |= CSR2_VPP_VCC; else return 1; break; case 120: if ((vcc == 33) || (vcc == 50)) reg |= CSR2_VPP_12; else return 1; default: return 1; } /* first, turn off all power */ out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK)); /* enable new powersettings */ out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg); return 0; } #define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V #define hardware_enable(_slot_) /* No hardware to enable */ #define hardware_disable(_slot_) /* No hardware to disable */ #endif /* CONFIG_MBX */ #if defined(CONFIG_PRxK) #include <asm/cpld.h> extern volatile fpga_pc_regs *fpga_pc; #define PCMCIA_BOARD_MSG "MPC855T" static int voltage_set(int slot, int vcc, int vpp) { u8 reg = 0; u8 regread; cpld_regs *ccpld = get_cpld(); switch (vcc) { case 0: break; case 33: reg |= PCMCIA_VCC_33; break; case 50: reg |= PCMCIA_VCC_50; break; default: return 1; } switch (vpp) { case 0: break; case 33: case 50: if (vcc == vpp) reg |= PCMCIA_VPP_VCC; else return 1; break; case 120: if ((vcc == 33) || (vcc == 50)) reg |= PCMCIA_VPP_12; else return 1; default: return 1; } reg = reg >> (slot << 2); regread = in_8(&ccpld->fpga_pc_ctl); if (reg != (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) { /* enable new powersettings */ regread = regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)); out_8(&ccpld->fpga_pc_ctl, reg | regread); msleep(100); } return 0; } #define socket_get(_slot_) PCMCIA_SOCKET_KEY_LV #define hardware_enable(_slot_) /* No hardware to enable */ #define hardware_disable(_slot_) /* No hardware to disable */ #endif /* CONFIG_PRxK */ static u32 pending_events[PCMCIA_SOCKETS_NO]; static DEFINE_SPINLOCK(pending_event_lock); static irqreturn_t m8xx_interrupt(int irq, void *dev) { struct socket_info *s; struct event_table *e; unsigned int i, events, pscr, pipr, per; pcmconf8xx_t *pcmcia = socket[0].pcmcia; dprintk("Interrupt!\n"); /* get interrupt sources */ pscr = in_be32(&pcmcia->pcmc_pscr); pipr = in_be32(&pcmcia->pcmc_pipr); per = in_be32(&pcmcia->pcmc_per); for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { s = &socket[i]; e = &s->events[0]; events = 0; while (e->regbit) { if (pscr & e->regbit) events |= e->eventbit; e++; } /* * report only if both card detect signals are the same * not too nice done, * we depend on that CD2 is the bit to the left of CD1... */ if (events & SS_DETECT) if (((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^ (pipr & M8XX_PCMCIA_CD1(i))) { events &= ~SS_DETECT; } #ifdef PCMCIA_GLITCHY_CD /* * I've experienced CD problems with my ADS board. * We make an extra check to see if there was a * real change of Card detection. */ if ((events & SS_DETECT) && ((pipr & (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) && (s->state.Vcc | s->state.Vpp)) { events &= ~SS_DETECT; /*printk( "CD glitch workaround - CD = 0x%08x!\n", (pipr & (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i)))); */ } #endif /* call the handler */ dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, " "pipr = 0x%08x\n", i, events, pscr, pipr); if (events) { spin_lock(&pending_event_lock); pending_events[i] |= events; spin_unlock(&pending_event_lock); /* * Turn off RDY_L bits in the PER mask on * CD interrupt receival. * * They can generate bad interrupts on the * ACS4,8,16,32. - marcelo */ per &= ~M8XX_PCMCIA_RDY_L(0); per &= ~M8XX_PCMCIA_RDY_L(1); out_be32(&pcmcia->pcmc_per, per); if (events) pcmcia_parse_events(&socket[i].socket, events); } } /* clear the interrupt sources */ out_be32(&pcmcia->pcmc_pscr, pscr); dprintk("Interrupt done.\n"); return IRQ_HANDLED; } static u32 m8xx_get_graycode(u32 size) { u32 k; for (k = 0; k < M8XX_SIZES_NO; k++) if (m8xx_size_to_gray[k] == size) break; if ((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1)) k = -1; return k; } static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq) { u32 reg, clocks, psst, psl, psht; if (!ns) { /* * We get called with IO maps setup to 0ns * if not specified by the user. * They should be 255ns. */ if (is_io) ns = 255; else ns = 100; /* fast memory if 0 */ } /* * In PSST, PSL, PSHT fields we tell the controller * timing parameters in CLKOUT clock cycles. * CLKOUT is the same as GCLK2_50. */ /* how we want to adjust the timing - in percent */ #define ADJ 180 /* 80 % longer accesstime - to be sure */ clocks = ((bus_freq / 1000) * ns) / 1000; clocks = (clocks * ADJ) / (100 * 1000); if (clocks >= PCMCIA_BMT_LIMIT) { printk("Max access time limit reached\n"); clocks = PCMCIA_BMT_LIMIT - 1; } psst = clocks / 7; /* setup time */ psht = clocks / 7; /* hold time */ psl = (clocks * 5) / 7; /* strobe length */ psst += clocks - (psst + psht + psl); reg = psst << 12; reg |= psl << 7; reg |= psht << 16; return reg; } static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value) { int lsock = container_of(sock, struct socket_info, socket)->slot; struct socket_info *s = &socket[lsock]; unsigned int pipr, reg; pcmconf8xx_t *pcmcia = s->pcmcia; pipr = in_be32(&pcmcia->pcmc_pipr); *value = ((pipr & (M8XX_PCMCIA_CD1(lsock) | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0; *value |= (pipr & M8XX_PCMCIA_WP(lsock)) ? SS_WRPROT : 0; if (s->state.flags & SS_IOCARD) *value |= (pipr & M8XX_PCMCIA_BVD1(lsock)) ? SS_STSCHG : 0; else { *value |= (pipr & M8XX_PCMCIA_RDY(lsock)) ? SS_READY : 0; *value |= (pipr & M8XX_PCMCIA_BVD1(lsock)) ? SS_BATDEAD : 0; *value |= (pipr & M8XX_PCMCIA_BVD2(lsock)) ? SS_BATWARN : 0; } if (s->state.Vcc | s->state.Vpp) *value |= SS_POWERON; /* * Voltage detection: * This driver only supports 16-Bit pc-cards. * Cardbus is not handled here. * * To determine what voltage to use we must read the VS1 and VS2 pin. * Depending on what socket type is present, * different combinations mean different things. * * Card Key Socket Key VS1 VS2 Card Vcc for CIS parse * * 5V 5V, LV* NC NC 5V only 5V (if available) * * 5V 5V, LV* GND NC 5 or 3.3V as low as possible * * 5V 5V, LV* GND GND 5, 3.3, x.xV as low as possible * * LV* 5V - - shall not fit into socket * * LV* LV* GND NC 3.3V only 3.3V * * LV* LV* NC GND x.xV x.xV (if avail.) * * LV* LV* GND GND 3.3 or x.xV as low as possible * * *LV means Low Voltage * * * That gives us the following table: * * Socket VS1 VS2 Voltage * * 5V NC NC 5V * 5V NC GND none (should not be possible) * 5V GND NC >= 3.3V * 5V GND GND >= x.xV * * LV NC NC 5V (if available) * LV NC GND x.xV (if available) * LV GND NC 3.3V * LV GND GND >= x.xV * * So, how do I determine if I have a 5V or a LV * socket on my board? Look at the socket! * * * Socket with 5V key: * ++--------------------------------------------+ * || | * || || * || || * | | * +---------------------------------------------+ * * Socket with LV key: * ++--------------------------------------------+ * || | * | || * | || * | | * +---------------------------------------------+ * * * With other words - LV only cards does not fit * into the 5V socket! */ /* read out VS1 and VS2 */ reg = (pipr & M8XX_PCMCIA_VS_MASK(lsock)) >> M8XX_PCMCIA_VS_SHIFT(lsock); if (socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) { switch (reg) { case 1: *value |= SS_3VCARD; break; /* GND, NC - 3.3V only */ case 2: *value |= SS_XVCARD; break; /* NC. GND - x.xV only */ }; } dprintk("GetStatus(%d) = %#2.2x\n", lsock, *value); return 0; } static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state) { int lsock = container_of(sock, struct socket_info, socket)->slot; struct socket_info *s = &socket[lsock]; struct event_table *e; unsigned int reg; unsigned long flags; pcmconf8xx_t *pcmcia = socket[0].pcmcia; dprintk("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags, state->Vcc, state->Vpp, state->io_irq, state->csc_mask); /* First, set voltage - bail out if invalid */ if (voltage_set(lsock, state->Vcc, state->Vpp)) return -EINVAL; /* Take care of reset... */ if (state->flags & SS_RESET) out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */ else out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET); /* ... and output enable. */ /* The CxOE signal is connected to a 74541 on the ADS. I guess most other boards used the ADS as a reference. I tried to control the CxOE signal with SS_OUTPUT_ENA, but the reset signal seems connected via the 541. If the CxOE is left high are some signals tristated and no pullups are present -> the cards act weird. So right now the buffers are enabled if the power is on. */ if (state->Vcc || state->Vpp) out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */ else out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE); /* * We'd better turn off interrupts before * we mess with the events-table.. */ spin_lock_irqsave(&events_lock, flags); /* * Play around with the interrupt mask to be able to * give the events the generic pcmcia driver wants us to. */ e = &s->events[0]; reg = 0; if (state->csc_mask & SS_DETECT) { e->eventbit = SS_DETECT; reg |= e->regbit = (M8XX_PCMCIA_CD2(lsock) | M8XX_PCMCIA_CD1(lsock)); e++; } if (state->flags & SS_IOCARD) { /* * I/O card */ if (state->csc_mask & SS_STSCHG) { e->eventbit = SS_STSCHG; reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock); e++; } /* * If io_irq is non-zero we should enable irq. */ if (state->io_irq) { out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | mk_int_int_mask(s->hwirq) << 24); /* * Strange thing here: * The manual does not tell us which interrupt * the sources generate. * Anyhow, I found out that RDY_L generates IREQLVL. * * We use level triggerd interrupts, and they don't * have to be cleared in PSCR in the interrupt handler. */ reg |= M8XX_PCMCIA_RDY_L(lsock); } else out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff); } else { /* * Memory card */ if (state->csc_mask & SS_BATDEAD) { e->eventbit = SS_BATDEAD; reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock); e++; } if (state->csc_mask & SS_BATWARN) { e->eventbit = SS_BATWARN; reg |= e->regbit = M8XX_PCMCIA_BVD2(lsock); e++; } /* What should I trigger on - low/high,raise,fall? */ if (state->csc_mask & SS_READY) { e->eventbit = SS_READY; reg |= e->regbit = 0; //?? e++; } } e->regbit = 0; /* terminate list */ /* * Clear the status changed . * Port A and Port B share the same port. * Writing ones will clear the bits. */ out_be32(&pcmcia->pcmc_pscr, reg); /* * Write the mask. * Port A and Port B share the same port. * Need for read-modify-write. * Ones will enable the interrupt. */ reg |= in_be32(&pcmcia-> pcmc_per) & (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1)); out_be32(&pcmcia->pcmc_per, reg); spin_unlock_irqrestore(&events_lock, flags); /* copy the struct and modify the copy */ s->state = *state; return 0; } static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) { int lsock = container_of(sock, struct socket_info, socket)->slot; struct socket_info *s = &socket[lsock]; struct pcmcia_win *w; unsigned int reg, winnr; pcmconf8xx_t *pcmcia = s->pcmcia; #define M8XX_SIZE (io->stop - io->start + 1) #define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start) dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, " "%#4.4llx-%#4.4llx)\n", lsock, io->map, io->flags, io->speed, (unsigned long long)io->start, (unsigned long long)io->stop); if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)) return -EINVAL; if ((reg = m8xx_get_graycode(M8XX_SIZE)) == -1) return -EINVAL; if (io->flags & MAP_ACTIVE) { dprintk("io->flags & MAP_ACTIVE\n"); winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) + (lsock * PCMCIA_IO_WIN_NO) + io->map; /* setup registers */ w = (void *)&pcmcia->pcmc_pbr0; w += winnr; out_be32(&w->or, 0); /* turn off window first */ out_be32(&w->br, M8XX_BASE); reg <<= 27; reg |= M8XX_PCMCIA_POR_IO | (lsock << 2); reg |= m8xx_get_speed(io->speed, 1, s->bus_freq); if (io->flags & MAP_WRPROT) reg |= M8XX_PCMCIA_POR_WRPROT; /*if(io->flags & (MAP_16BIT | MAP_AUTOSZ)) */ if (io->flags & MAP_16BIT) reg |= M8XX_PCMCIA_POR_16BIT; if (io->flags & MAP_ACTIVE) reg |= M8XX_PCMCIA_POR_VALID; out_be32(&w->or, reg); dprintk("Socket %u: Mapped io window %u at %#8.8x, " "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); } else { /* shutdown IO window */ winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) + (lsock * PCMCIA_IO_WIN_NO) + io->map; /* setup registers */ w = (void *)&pcmcia->pcmc_pbr0; w += winnr; out_be32(&w->or, 0); /* turn off window */ out_be32(&w->br, 0); /* turn off base address */ dprintk("Socket %u: Unmapped io window %u at %#8.8x, " "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); } /* copy the struct and modify the copy */ s->io_win[io->map] = *io; s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE); dprintk("SetIOMap exit\n"); return 0; } static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem) { int lsock = container_of(sock, struct socket_info, socket)->slot; struct socket_info *s = &socket[lsock]; struct pcmcia_win *w; struct pccard_mem_map *old; unsigned int reg, winnr; pcmconf8xx_t *pcmcia = s->pcmcia; dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, mem->speed, (unsigned long long)mem->static_start, mem->card_start); if ((mem->map >= PCMCIA_MEM_WIN_NO) // || ((mem->s) >= PCMCIA_MEM_WIN_SIZE) || (mem->card_start >= 0x04000000) || (mem->static_start & 0xfff) /* 4KByte resolution */ ||(mem->card_start & 0xfff)) return -EINVAL; if ((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) { printk("Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE); return -EINVAL; } reg <<= 27; winnr = (lsock * PCMCIA_MEM_WIN_NO) + mem->map; /* Setup the window in the pcmcia controller */ w = (void *)&pcmcia->pcmc_pbr0; w += winnr; reg |= lsock << 2; reg |= m8xx_get_speed(mem->speed, 0, s->bus_freq); if (mem->flags & MAP_ATTRIB) reg |= M8XX_PCMCIA_POR_ATTRMEM; if (mem->flags & MAP_WRPROT) reg |= M8XX_PCMCIA_POR_WRPROT; if (mem->flags & MAP_16BIT) reg |= M8XX_PCMCIA_POR_16BIT; if (mem->flags & MAP_ACTIVE) reg |= M8XX_PCMCIA_POR_VALID; out_be32(&w->or, reg); dprintk("Socket %u: Mapped memory window %u at %#8.8x, " "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or); if (mem->flags & MAP_ACTIVE) { /* get the new base address */ mem->static_start = PCMCIA_MEM_WIN_BASE + (PCMCIA_MEM_WIN_SIZE * winnr) + mem->card_start; } dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, mem->speed, (unsigned long long)mem->static_start, mem->card_start); /* copy the struct and modify the copy */ old = &s->mem_win[mem->map]; *old = *mem; old->flags &= (MAP_ATTRIB | MAP_WRPROT | MAP_16BIT | MAP_ACTIVE); return 0; } static int m8xx_sock_init(struct pcmcia_socket *sock) { int i; pccard_io_map io = { 0, 0, 0, 0, 1 }; pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 }; dprintk("sock_init(%d)\n", s); m8xx_set_socket(sock, &dead_socket); for (i = 0; i < PCMCIA_IO_WIN_NO; i++) { io.map = i; m8xx_set_io_map(sock, &io); } for (i = 0; i < PCMCIA_MEM_WIN_NO; i++) { mem.map = i; m8xx_set_mem_map(sock, &mem); } return 0; } static int m8xx_sock_suspend(struct pcmcia_socket *sock) { return m8xx_set_socket(sock, &dead_socket); } static struct pccard_operations m8xx_services = { .init = m8xx_sock_init, .suspend = m8xx_sock_suspend, .get_status = m8xx_get_status, .set_socket = m8xx_set_socket, .set_io_map = m8xx_set_io_map, .set_mem_map = m8xx_set_mem_map, }; static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id *match) { struct pcmcia_win *w; unsigned int i, m, hwirq; pcmconf8xx_t *pcmcia; int status; struct device_node *np = ofdev->node; pcmcia_info("%s\n", version); pcmcia = of_iomap(np, 0); if (pcmcia == NULL) return -EINVAL; pcmcia_schlvl = irq_of_parse_and_map(np, 0); hwirq = irq_map[pcmcia_schlvl].hwirq; if (pcmcia_schlvl < 0) { iounmap(pcmcia); return -EINVAL; } m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra; m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb; pcmcia_info(PCMCIA_BOARD_MSG " using " PCMCIA_SLOT_MSG " with IRQ %u (%d). \n", pcmcia_schlvl, hwirq); /* Configure Status change interrupt */ if (request_irq(pcmcia_schlvl, m8xx_interrupt, IRQF_SHARED, driver_name, socket)) { pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n", pcmcia_schlvl); iounmap(pcmcia); return -1; } w = (void *)&pcmcia->pcmc_pbr0; out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1)); clrbits32(&pcmcia->pcmc_per, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1)); /* connect interrupt and disable CxOE */ out_be32(M8XX_PGCRX(0), M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16)); out_be32(M8XX_PGCRX(1), M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16)); /* intialize the fixed memory windows */ for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) { out_be32(&w->br, PCMCIA_MEM_WIN_BASE + (PCMCIA_MEM_WIN_SIZE * (m + i * PCMCIA_MEM_WIN_NO))); out_be32(&w->or, 0); /* set to not valid */ w++; } } /* turn off voltage */ voltage_set(0, 0, 0); voltage_set(1, 0, 0); /* Enable external hardware */ hardware_enable(0); hardware_enable(1); for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { socket[i].slot = i; socket[i].socket.owner = THIS_MODULE; socket[i].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP; socket[i].socket.irq_mask = 0x000; socket[i].socket.map_size = 0x1000; socket[i].socket.io_offset = 0; socket[i].socket.pci_irq = pcmcia_schlvl; socket[i].socket.ops = &m8xx_services; socket[i].socket.resource_ops = &pccard_nonstatic_ops; socket[i].socket.cb_dev = NULL; socket[i].socket.dev.parent = &ofdev->dev; socket[i].pcmcia = pcmcia; socket[i].bus_freq = ppc_proc_freq; socket[i].hwirq = hwirq; } for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { status = pcmcia_register_socket(&socket[i].socket); if (status < 0) pcmcia_error("Socket register failed\n"); } return 0; } static int m8xx_remove(struct of_device *ofdev) { u32 m, i; struct pcmcia_win *w; pcmconf8xx_t *pcmcia = socket[0].pcmcia; for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { w = (void *)&pcmcia->pcmc_pbr0; out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(i)); out_be32(&pcmcia->pcmc_per, in_be32(&pcmcia->pcmc_per) & ~M8XX_PCMCIA_MASK(i)); /* turn off interrupt and disable CxOE */ out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE); /* turn off memory windows */ for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) { out_be32(&w->or, 0); /* set to not valid */ w++; } /* turn off voltage */ voltage_set(i, 0, 0); /* disable external hardware */ hardware_disable(i); } for (i = 0; i < PCMCIA_SOCKETS_NO; i++) pcmcia_unregister_socket(&socket[i].socket); iounmap(pcmcia); free_irq(pcmcia_schlvl, NULL); return 0; } #ifdef CONFIG_PM static int m8xx_suspend(struct platform_device *pdev, pm_message_t state) { return pcmcia_socket_dev_suspend(&pdev->dev); } static int m8xx_resume(struct platform_device *pdev) { return pcmcia_socket_dev_resume(&pdev->dev); } #else #define m8xx_suspend NULL #define m8xx_resume NULL #endif static struct of_device_id m8xx_pcmcia_match[] = { { .type = "pcmcia", .compatible = "fsl,pq-pcmcia", }, {}, }; MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match); static struct of_platform_driver m8xx_pcmcia_driver = { .name = driver_name, .match_table = m8xx_pcmcia_match, .probe = m8xx_probe, .remove = m8xx_remove, .suspend = m8xx_suspend, .resume = m8xx_resume, }; static int __init m8xx_init(void) { return of_register_platform_driver(&m8xx_pcmcia_driver); } static void __exit m8xx_exit(void) { of_unregister_platform_driver(&m8xx_pcmcia_driver); } module_init(m8xx_init); module_exit(m8xx_exit);
gpl-2.0
scjen/rts-pj2
drivers/hwmon/dme1737.c
505
75410
/* * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x and * SCH5027 Super-I/O chips integrated hardware monitoring features. * Copyright (c) 2007, 2008 Juerg Haefliger <juergh@gmail.com> * * This driver is an I2C/ISA hybrid, meaning that it uses the I2C bus to access * the chip registers if a DME1737, A8000, or SCH5027 is found and the ISA bus * if a SCH311x chip is found. Both types of chips have very similar hardware * monitoring capabilities but differ in the way they can be accessed. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/acpi.h> #include <linux/io.h> /* ISA device, if found */ static struct platform_device *pdev; /* Module load parameters */ static int force_start; module_param(force_start, bool, 0); MODULE_PARM_DESC(force_start, "Force the chip to start monitoring inputs"); static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); static int probe_all_addr; module_param(probe_all_addr, bool, 0); MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC " "addresses"); /* Addresses to scan */ static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END}; /* Insmod parameters */ I2C_CLIENT_INSMOD_2(dme1737, sch5027); /* ISA chip types */ enum isa_chips { sch311x = sch5027 + 1 }; /* --------------------------------------------------------------------- * Registers * * The sensors are defined as follows: * * Voltages Temperatures * -------- ------------ * in0 +5VTR (+5V stdby) temp1 Remote diode 1 * in1 Vccp (proc core) temp2 Internal temp * in2 VCC (internal +3.3V) temp3 Remote diode 2 * in3 +5V * in4 +12V * in5 VTR (+3.3V stby) * in6 Vbat * * --------------------------------------------------------------------- */ /* Voltages (in) numbered 0-6 (ix) */ #define DME1737_REG_IN(ix) ((ix) < 5 ? 0x20 + (ix) \ : 0x94 + (ix)) #define DME1737_REG_IN_MIN(ix) ((ix) < 5 ? 0x44 + (ix) * 2 \ : 0x91 + (ix) * 2) #define DME1737_REG_IN_MAX(ix) ((ix) < 5 ? 0x45 + (ix) * 2 \ : 0x92 + (ix) * 2) /* Temperatures (temp) numbered 0-2 (ix) */ #define DME1737_REG_TEMP(ix) (0x25 + (ix)) #define DME1737_REG_TEMP_MIN(ix) (0x4e + (ix) * 2) #define DME1737_REG_TEMP_MAX(ix) (0x4f + (ix) * 2) #define DME1737_REG_TEMP_OFFSET(ix) ((ix) == 0 ? 0x1f \ : 0x1c + (ix)) /* Voltage and temperature LSBs * The LSBs (4 bits each) are stored in 5 registers with the following layouts: * IN_TEMP_LSB(0) = [in5, in6] * IN_TEMP_LSB(1) = [temp3, temp1] * IN_TEMP_LSB(2) = [in4, temp2] * IN_TEMP_LSB(3) = [in3, in0] * IN_TEMP_LSB(4) = [in2, in1] */ #define DME1737_REG_IN_TEMP_LSB(ix) (0x84 + (ix)) static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0}; static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4}; static const u8 DME1737_REG_TEMP_LSB[] = {1, 2, 1}; static const u8 DME1737_REG_TEMP_LSB_SHL[] = {4, 4, 0}; /* Fans numbered 0-5 (ix) */ #define DME1737_REG_FAN(ix) ((ix) < 4 ? 0x28 + (ix) * 2 \ : 0xa1 + (ix) * 2) #define DME1737_REG_FAN_MIN(ix) ((ix) < 4 ? 0x54 + (ix) * 2 \ : 0xa5 + (ix) * 2) #define DME1737_REG_FAN_OPT(ix) ((ix) < 4 ? 0x90 + (ix) \ : 0xb2 + (ix)) #define DME1737_REG_FAN_MAX(ix) (0xb4 + (ix)) /* only for fan[4-5] */ /* PWMs numbered 0-2, 4-5 (ix) */ #define DME1737_REG_PWM(ix) ((ix) < 3 ? 0x30 + (ix) \ : 0xa1 + (ix)) #define DME1737_REG_PWM_CONFIG(ix) (0x5c + (ix)) /* only for pwm[0-2] */ #define DME1737_REG_PWM_MIN(ix) (0x64 + (ix)) /* only for pwm[0-2] */ #define DME1737_REG_PWM_FREQ(ix) ((ix) < 3 ? 0x5f + (ix) \ : 0xa3 + (ix)) /* The layout of the ramp rate registers is different from the other pwm * registers. The bits for the 3 PWMs are stored in 2 registers: * PWM_RR(0) = [OFF3, OFF2, OFF1, RES, RR1E, RR1-2, RR1-1, RR1-0] * PWM_RR(1) = [RR2E, RR2-2, RR2-1, RR2-0, RR3E, RR3-2, RR3-1, RR3-0] */ #define DME1737_REG_PWM_RR(ix) (0x62 + (ix)) /* only for pwm[0-2] */ /* Thermal zones 0-2 */ #define DME1737_REG_ZONE_LOW(ix) (0x67 + (ix)) #define DME1737_REG_ZONE_ABS(ix) (0x6a + (ix)) /* The layout of the hysteresis registers is different from the other zone * registers. The bits for the 3 zones are stored in 2 registers: * ZONE_HYST(0) = [H1-3, H1-2, H1-1, H1-0, H2-3, H2-2, H2-1, H2-0] * ZONE_HYST(1) = [H3-3, H3-2, H3-1, H3-0, RES, RES, RES, RES] */ #define DME1737_REG_ZONE_HYST(ix) (0x6d + (ix)) /* Alarm registers and bit mapping * The 3 8-bit alarm registers will be concatenated to a single 32-bit * alarm value [0, ALARM3, ALARM2, ALARM1]. */ #define DME1737_REG_ALARM1 0x41 #define DME1737_REG_ALARM2 0x42 #define DME1737_REG_ALARM3 0x83 static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17}; static const u8 DME1737_BIT_ALARM_TEMP[] = {4, 5, 6}; static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23}; /* Miscellaneous registers */ #define DME1737_REG_DEVICE 0x3d #define DME1737_REG_COMPANY 0x3e #define DME1737_REG_VERSTEP 0x3f #define DME1737_REG_CONFIG 0x40 #define DME1737_REG_CONFIG2 0x7f #define DME1737_REG_VID 0x43 #define DME1737_REG_TACH_PWM 0x81 /* --------------------------------------------------------------------- * Misc defines * --------------------------------------------------------------------- */ /* Chip identification */ #define DME1737_COMPANY_SMSC 0x5c #define DME1737_VERSTEP 0x88 #define DME1737_VERSTEP_MASK 0xf8 #define SCH311X_DEVICE 0x8c #define SCH5027_VERSTEP 0x69 /* Length of ISA address segment */ #define DME1737_EXTENT 2 /* --------------------------------------------------------------------- * Data structures and manipulation thereof * --------------------------------------------------------------------- */ struct dme1737_data { struct i2c_client *client; /* for I2C devices only */ struct device *hwmon_dev; const char *name; unsigned int addr; /* for ISA devices only */ struct mutex update_lock; int valid; /* !=0 if following fields are valid */ unsigned long last_update; /* in jiffies */ unsigned long last_vbat; /* in jiffies */ enum chips type; const int *in_nominal; /* pointer to IN_NOMINAL array */ u8 vid; u8 pwm_rr_en; u8 has_pwm; u8 has_fan; /* Register values */ u16 in[7]; u8 in_min[7]; u8 in_max[7]; s16 temp[3]; s8 temp_min[3]; s8 temp_max[3]; s8 temp_offset[3]; u8 config; u8 config2; u8 vrm; u16 fan[6]; u16 fan_min[6]; u8 fan_max[2]; u8 fan_opt[6]; u8 pwm[6]; u8 pwm_min[3]; u8 pwm_config[3]; u8 pwm_acz[3]; u8 pwm_freq[6]; u8 pwm_rr[2]; u8 zone_low[3]; u8 zone_abs[3]; u8 zone_hyst[2]; u32 alarms; }; /* Nominal voltage values */ static const int IN_NOMINAL_DME1737[] = {5000, 2250, 3300, 5000, 12000, 3300, 3300}; static const int IN_NOMINAL_SCH311x[] = {2500, 1500, 3300, 5000, 12000, 3300, 3300}; static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300, 3300}; #define IN_NOMINAL(type) ((type) == sch311x ? IN_NOMINAL_SCH311x : \ (type) == sch5027 ? IN_NOMINAL_SCH5027 : \ IN_NOMINAL_DME1737) /* Voltage input * Voltage inputs have 16 bits resolution, limit values have 8 bits * resolution. */ static inline int IN_FROM_REG(int reg, int nominal, int res) { return (reg * nominal + (3 << (res - 3))) / (3 << (res - 2)); } static inline int IN_TO_REG(int val, int nominal) { return SENSORS_LIMIT((val * 192 + nominal / 2) / nominal, 0, 255); } /* Temperature input * The register values represent temperatures in 2's complement notation from * -127 degrees C to +127 degrees C. Temp inputs have 16 bits resolution, limit * values have 8 bits resolution. */ static inline int TEMP_FROM_REG(int reg, int res) { return (reg * 1000) >> (res - 8); } static inline int TEMP_TO_REG(int val) { return SENSORS_LIMIT((val < 0 ? val - 500 : val + 500) / 1000, -128, 127); } /* Temperature range */ static const int TEMP_RANGE[] = {2000, 2500, 3333, 4000, 5000, 6666, 8000, 10000, 13333, 16000, 20000, 26666, 32000, 40000, 53333, 80000}; static inline int TEMP_RANGE_FROM_REG(int reg) { return TEMP_RANGE[(reg >> 4) & 0x0f]; } static int TEMP_RANGE_TO_REG(int val, int reg) { int i; for (i = 15; i > 0; i--) { if (val > (TEMP_RANGE[i] + TEMP_RANGE[i - 1] + 1) / 2) { break; } } return (reg & 0x0f) | (i << 4); } /* Temperature hysteresis * Register layout: * reg[0] = [H1-3, H1-2, H1-1, H1-0, H2-3, H2-2, H2-1, H2-0] * reg[1] = [H3-3, H3-2, H3-1, H3-0, xxxx, xxxx, xxxx, xxxx] */ static inline int TEMP_HYST_FROM_REG(int reg, int ix) { return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000; } static inline int TEMP_HYST_TO_REG(int val, int ix, int reg) { int hyst = SENSORS_LIMIT((val + 500) / 1000, 0, 15); return (ix == 1) ? (reg & 0xf0) | hyst : (reg & 0x0f) | (hyst << 4); } /* Fan input RPM */ static inline int FAN_FROM_REG(int reg, int tpc) { if (tpc) { return tpc * reg; } else { return (reg == 0 || reg == 0xffff) ? 0 : 90000 * 60 / reg; } } static inline int FAN_TO_REG(int val, int tpc) { if (tpc) { return SENSORS_LIMIT(val / tpc, 0, 0xffff); } else { return (val <= 0) ? 0xffff : SENSORS_LIMIT(90000 * 60 / val, 0, 0xfffe); } } /* Fan TPC (tach pulse count) * Converts a register value to a TPC multiplier or returns 0 if the tachometer * is configured in legacy (non-tpc) mode */ static inline int FAN_TPC_FROM_REG(int reg) { return (reg & 0x20) ? 0 : 60 >> (reg & 0x03); } /* Fan type * The type of a fan is expressed in number of pulses-per-revolution that it * emits */ static inline int FAN_TYPE_FROM_REG(int reg) { int edge = (reg >> 1) & 0x03; return (edge > 0) ? 1 << (edge - 1) : 0; } static inline int FAN_TYPE_TO_REG(int val, int reg) { int edge = (val == 4) ? 3 : val; return (reg & 0xf9) | (edge << 1); } /* Fan max RPM */ static const int FAN_MAX[] = {0x54, 0x38, 0x2a, 0x21, 0x1c, 0x18, 0x15, 0x12, 0x11, 0x0f, 0x0e}; static int FAN_MAX_FROM_REG(int reg) { int i; for (i = 10; i > 0; i--) { if (reg == FAN_MAX[i]) { break; } } return 1000 + i * 500; } static int FAN_MAX_TO_REG(int val) { int i; for (i = 10; i > 0; i--) { if (val > (1000 + (i - 1) * 500)) { break; } } return FAN_MAX[i]; } /* PWM enable * Register to enable mapping: * 000: 2 fan on zone 1 auto * 001: 2 fan on zone 2 auto * 010: 2 fan on zone 3 auto * 011: 0 fan full on * 100: -1 fan disabled * 101: 2 fan on hottest of zones 2,3 auto * 110: 2 fan on hottest of zones 1,2,3 auto * 111: 1 fan in manual mode */ static inline int PWM_EN_FROM_REG(int reg) { static const int en[] = {2, 2, 2, 0, -1, 2, 2, 1}; return en[(reg >> 5) & 0x07]; } static inline int PWM_EN_TO_REG(int val, int reg) { int en = (val == 1) ? 7 : 3; return (reg & 0x1f) | ((en & 0x07) << 5); } /* PWM auto channels zone * Register to auto channels zone mapping (ACZ is a bitfield with bit x * corresponding to zone x+1): * 000: 001 fan on zone 1 auto * 001: 010 fan on zone 2 auto * 010: 100 fan on zone 3 auto * 011: 000 fan full on * 100: 000 fan disabled * 101: 110 fan on hottest of zones 2,3 auto * 110: 111 fan on hottest of zones 1,2,3 auto * 111: 000 fan in manual mode */ static inline int PWM_ACZ_FROM_REG(int reg) { static const int acz[] = {1, 2, 4, 0, 0, 6, 7, 0}; return acz[(reg >> 5) & 0x07]; } static inline int PWM_ACZ_TO_REG(int val, int reg) { int acz = (val == 4) ? 2 : val - 1; return (reg & 0x1f) | ((acz & 0x07) << 5); } /* PWM frequency */ static const int PWM_FREQ[] = {11, 15, 22, 29, 35, 44, 59, 88, 15000, 20000, 30000, 25000, 0, 0, 0, 0}; static inline int PWM_FREQ_FROM_REG(int reg) { return PWM_FREQ[reg & 0x0f]; } static int PWM_FREQ_TO_REG(int val, int reg) { int i; /* the first two cases are special - stupid chip design! */ if (val > 27500) { i = 10; } else if (val > 22500) { i = 11; } else { for (i = 9; i > 0; i--) { if (val > (PWM_FREQ[i] + PWM_FREQ[i - 1] + 1) / 2) { break; } } } return (reg & 0xf0) | i; } /* PWM ramp rate * Register layout: * reg[0] = [OFF3, OFF2, OFF1, RES, RR1-E, RR1-2, RR1-1, RR1-0] * reg[1] = [RR2-E, RR2-2, RR2-1, RR2-0, RR3-E, RR3-2, RR3-1, RR3-0] */ static const u8 PWM_RR[] = {206, 104, 69, 41, 26, 18, 10, 5}; static inline int PWM_RR_FROM_REG(int reg, int ix) { int rr = (ix == 1) ? reg >> 4 : reg; return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0; } static int PWM_RR_TO_REG(int val, int ix, int reg) { int i; for (i = 0; i < 7; i++) { if (val > (PWM_RR[i] + PWM_RR[i + 1] + 1) / 2) { break; } } return (ix == 1) ? (reg & 0x8f) | (i << 4) : (reg & 0xf8) | i; } /* PWM ramp rate enable */ static inline int PWM_RR_EN_FROM_REG(int reg, int ix) { return PWM_RR_FROM_REG(reg, ix) ? 1 : 0; } static inline int PWM_RR_EN_TO_REG(int val, int ix, int reg) { int en = (ix == 1) ? 0x80 : 0x08; return val ? reg | en : reg & ~en; } /* PWM min/off * The PWM min/off bits are part of the PMW ramp rate register 0 (see above for * the register layout). */ static inline int PWM_OFF_FROM_REG(int reg, int ix) { return (reg >> (ix + 5)) & 0x01; } static inline int PWM_OFF_TO_REG(int val, int ix, int reg) { return (reg & ~(1 << (ix + 5))) | ((val & 0x01) << (ix + 5)); } /* --------------------------------------------------------------------- * Device I/O access * * ISA access is performed through an index/data register pair and needs to * be protected by a mutex during runtime (not required for initialization). * We use data->update_lock for this and need to ensure that we acquire it * before calling dme1737_read or dme1737_write. * --------------------------------------------------------------------- */ static u8 dme1737_read(const struct dme1737_data *data, u8 reg) { struct i2c_client *client = data->client; s32 val; if (client) { /* I2C device */ val = i2c_smbus_read_byte_data(client, reg); if (val < 0) { dev_warn(&client->dev, "Read from register " "0x%02x failed! Please report to the driver " "maintainer.\n", reg); } } else { /* ISA device */ outb(reg, data->addr); val = inb(data->addr + 1); } return val; } static s32 dme1737_write(const struct dme1737_data *data, u8 reg, u8 val) { struct i2c_client *client = data->client; s32 res = 0; if (client) { /* I2C device */ res = i2c_smbus_write_byte_data(client, reg, val); if (res < 0) { dev_warn(&client->dev, "Write to register " "0x%02x failed! Please report to the driver " "maintainer.\n", reg); } } else { /* ISA device */ outb(reg, data->addr); outb(val, data->addr + 1); } return res; } static struct dme1737_data *dme1737_update_device(struct device *dev) { struct dme1737_data *data = dev_get_drvdata(dev); int ix; u8 lsb[5]; mutex_lock(&data->update_lock); /* Enable a Vbat monitoring cycle every 10 mins */ if (time_after(jiffies, data->last_vbat + 600 * HZ) || !data->valid) { dme1737_write(data, DME1737_REG_CONFIG, dme1737_read(data, DME1737_REG_CONFIG) | 0x10); data->last_vbat = jiffies; } /* Sample register contents every 1 sec */ if (time_after(jiffies, data->last_update + HZ) || !data->valid) { if (data->type == dme1737) { data->vid = dme1737_read(data, DME1737_REG_VID) & 0x3f; } /* In (voltage) registers */ for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) { /* Voltage inputs are stored as 16 bit values even * though they have only 12 bits resolution. This is * to make it consistent with the temp inputs. */ data->in[ix] = dme1737_read(data, DME1737_REG_IN(ix)) << 8; data->in_min[ix] = dme1737_read(data, DME1737_REG_IN_MIN(ix)); data->in_max[ix] = dme1737_read(data, DME1737_REG_IN_MAX(ix)); } /* Temp registers */ for (ix = 0; ix < ARRAY_SIZE(data->temp); ix++) { /* Temp inputs are stored as 16 bit values even * though they have only 12 bits resolution. This is * to take advantage of implicit conversions between * register values (2's complement) and temp values * (signed decimal). */ data->temp[ix] = dme1737_read(data, DME1737_REG_TEMP(ix)) << 8; data->temp_min[ix] = dme1737_read(data, DME1737_REG_TEMP_MIN(ix)); data->temp_max[ix] = dme1737_read(data, DME1737_REG_TEMP_MAX(ix)); if (data->type != sch5027) { data->temp_offset[ix] = dme1737_read(data, DME1737_REG_TEMP_OFFSET(ix)); } } /* In and temp LSB registers * The LSBs are latched when the MSBs are read, so the order in * which the registers are read (MSB first, then LSB) is * important! */ for (ix = 0; ix < ARRAY_SIZE(lsb); ix++) { lsb[ix] = dme1737_read(data, DME1737_REG_IN_TEMP_LSB(ix)); } for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) { data->in[ix] |= (lsb[DME1737_REG_IN_LSB[ix]] << DME1737_REG_IN_LSB_SHL[ix]) & 0xf0; } for (ix = 0; ix < ARRAY_SIZE(data->temp); ix++) { data->temp[ix] |= (lsb[DME1737_REG_TEMP_LSB[ix]] << DME1737_REG_TEMP_LSB_SHL[ix]) & 0xf0; } /* Fan registers */ for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) { /* Skip reading registers if optional fans are not * present */ if (!(data->has_fan & (1 << ix))) { continue; } data->fan[ix] = dme1737_read(data, DME1737_REG_FAN(ix)); data->fan[ix] |= dme1737_read(data, DME1737_REG_FAN(ix) + 1) << 8; data->fan_min[ix] = dme1737_read(data, DME1737_REG_FAN_MIN(ix)); data->fan_min[ix] |= dme1737_read(data, DME1737_REG_FAN_MIN(ix) + 1) << 8; data->fan_opt[ix] = dme1737_read(data, DME1737_REG_FAN_OPT(ix)); /* fan_max exists only for fan[5-6] */ if (ix > 3) { data->fan_max[ix - 4] = dme1737_read(data, DME1737_REG_FAN_MAX(ix)); } } /* PWM registers */ for (ix = 0; ix < ARRAY_SIZE(data->pwm); ix++) { /* Skip reading registers if optional PWMs are not * present */ if (!(data->has_pwm & (1 << ix))) { continue; } data->pwm[ix] = dme1737_read(data, DME1737_REG_PWM(ix)); data->pwm_freq[ix] = dme1737_read(data, DME1737_REG_PWM_FREQ(ix)); /* pwm_config and pwm_min exist only for pwm[1-3] */ if (ix < 3) { data->pwm_config[ix] = dme1737_read(data, DME1737_REG_PWM_CONFIG(ix)); data->pwm_min[ix] = dme1737_read(data, DME1737_REG_PWM_MIN(ix)); } } for (ix = 0; ix < ARRAY_SIZE(data->pwm_rr); ix++) { data->pwm_rr[ix] = dme1737_read(data, DME1737_REG_PWM_RR(ix)); } /* Thermal zone registers */ for (ix = 0; ix < ARRAY_SIZE(data->zone_low); ix++) { data->zone_low[ix] = dme1737_read(data, DME1737_REG_ZONE_LOW(ix)); data->zone_abs[ix] = dme1737_read(data, DME1737_REG_ZONE_ABS(ix)); } if (data->type != sch5027) { for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) { data->zone_hyst[ix] = dme1737_read(data, DME1737_REG_ZONE_HYST(ix)); } } /* Alarm registers */ data->alarms = dme1737_read(data, DME1737_REG_ALARM1); /* Bit 7 tells us if the other alarm registers are non-zero and * therefore also need to be read */ if (data->alarms & 0x80) { data->alarms |= dme1737_read(data, DME1737_REG_ALARM2) << 8; data->alarms |= dme1737_read(data, DME1737_REG_ALARM3) << 16; } /* The ISA chips require explicit clearing of alarm bits. * Don't worry, an alarm will come back if the condition * that causes it still exists */ if (!data->client) { if (data->alarms & 0xff0000) { dme1737_write(data, DME1737_REG_ALARM3, 0xff); } if (data->alarms & 0xff00) { dme1737_write(data, DME1737_REG_ALARM2, 0xff); } if (data->alarms & 0xff) { dme1737_write(data, DME1737_REG_ALARM1, 0xff); } } data->last_update = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* --------------------------------------------------------------------- * Voltage sysfs attributes * ix = [0-5] * --------------------------------------------------------------------- */ #define SYS_IN_INPUT 0 #define SYS_IN_MIN 1 #define SYS_IN_MAX 2 #define SYS_IN_ALARM 3 static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { struct dme1737_data *data = dme1737_update_device(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; int res; switch (fn) { case SYS_IN_INPUT: res = IN_FROM_REG(data->in[ix], data->in_nominal[ix], 16); break; case SYS_IN_MIN: res = IN_FROM_REG(data->in_min[ix], data->in_nominal[ix], 8); break; case SYS_IN_MAX: res = IN_FROM_REG(data->in_max[ix], data->in_nominal[ix], 8); break; case SYS_IN_ALARM: res = (data->alarms >> DME1737_BIT_ALARM_IN[ix]) & 0x01; break; default: res = 0; dev_dbg(dev, "Unknown function %d.\n", fn); } return sprintf(buf, "%d\n", res); } static ssize_t set_in(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dme1737_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; long val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); switch (fn) { case SYS_IN_MIN: data->in_min[ix] = IN_TO_REG(val, data->in_nominal[ix]); dme1737_write(data, DME1737_REG_IN_MIN(ix), data->in_min[ix]); break; case SYS_IN_MAX: data->in_max[ix] = IN_TO_REG(val, data->in_nominal[ix]); dme1737_write(data, DME1737_REG_IN_MAX(ix), data->in_max[ix]); break; default: dev_dbg(dev, "Unknown function %d.\n", fn); } mutex_unlock(&data->update_lock); return count; } /* --------------------------------------------------------------------- * Temperature sysfs attributes * ix = [0-2] * --------------------------------------------------------------------- */ #define SYS_TEMP_INPUT 0 #define SYS_TEMP_MIN 1 #define SYS_TEMP_MAX 2 #define SYS_TEMP_OFFSET 3 #define SYS_TEMP_ALARM 4 #define SYS_TEMP_FAULT 5 static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct dme1737_data *data = dme1737_update_device(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; int res; switch (fn) { case SYS_TEMP_INPUT: res = TEMP_FROM_REG(data->temp[ix], 16); break; case SYS_TEMP_MIN: res = TEMP_FROM_REG(data->temp_min[ix], 8); break; case SYS_TEMP_MAX: res = TEMP_FROM_REG(data->temp_max[ix], 8); break; case SYS_TEMP_OFFSET: res = TEMP_FROM_REG(data->temp_offset[ix], 8); break; case SYS_TEMP_ALARM: res = (data->alarms >> DME1737_BIT_ALARM_TEMP[ix]) & 0x01; break; case SYS_TEMP_FAULT: res = (((u16)data->temp[ix] & 0xff00) == 0x8000); break; default: res = 0; dev_dbg(dev, "Unknown function %d.\n", fn); } return sprintf(buf, "%d\n", res); } static ssize_t set_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dme1737_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; long val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); switch (fn) { case SYS_TEMP_MIN: data->temp_min[ix] = TEMP_TO_REG(val); dme1737_write(data, DME1737_REG_TEMP_MIN(ix), data->temp_min[ix]); break; case SYS_TEMP_MAX: data->temp_max[ix] = TEMP_TO_REG(val); dme1737_write(data, DME1737_REG_TEMP_MAX(ix), data->temp_max[ix]); break; case SYS_TEMP_OFFSET: data->temp_offset[ix] = TEMP_TO_REG(val); dme1737_write(data, DME1737_REG_TEMP_OFFSET(ix), data->temp_offset[ix]); break; default: dev_dbg(dev, "Unknown function %d.\n", fn); } mutex_unlock(&data->update_lock); return count; } /* --------------------------------------------------------------------- * Zone sysfs attributes * ix = [0-2] * --------------------------------------------------------------------- */ #define SYS_ZONE_AUTO_CHANNELS_TEMP 0 #define SYS_ZONE_AUTO_POINT1_TEMP_HYST 1 #define SYS_ZONE_AUTO_POINT1_TEMP 2 #define SYS_ZONE_AUTO_POINT2_TEMP 3 #define SYS_ZONE_AUTO_POINT3_TEMP 4 static ssize_t show_zone(struct device *dev, struct device_attribute *attr, char *buf) { struct dme1737_data *data = dme1737_update_device(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; int res; switch (fn) { case SYS_ZONE_AUTO_CHANNELS_TEMP: /* check config2 for non-standard temp-to-zone mapping */ if ((ix == 1) && (data->config2 & 0x02)) { res = 4; } else { res = 1 << ix; } break; case SYS_ZONE_AUTO_POINT1_TEMP_HYST: res = TEMP_FROM_REG(data->zone_low[ix], 8) - TEMP_HYST_FROM_REG(data->zone_hyst[ix == 2], ix); break; case SYS_ZONE_AUTO_POINT1_TEMP: res = TEMP_FROM_REG(data->zone_low[ix], 8); break; case SYS_ZONE_AUTO_POINT2_TEMP: /* pwm_freq holds the temp range bits in the upper nibble */ res = TEMP_FROM_REG(data->zone_low[ix], 8) + TEMP_RANGE_FROM_REG(data->pwm_freq[ix]); break; case SYS_ZONE_AUTO_POINT3_TEMP: res = TEMP_FROM_REG(data->zone_abs[ix], 8); break; default: res = 0; dev_dbg(dev, "Unknown function %d.\n", fn); } return sprintf(buf, "%d\n", res); } static ssize_t set_zone(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dme1737_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; long val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); switch (fn) { case SYS_ZONE_AUTO_POINT1_TEMP_HYST: /* Refresh the cache */ data->zone_low[ix] = dme1737_read(data, DME1737_REG_ZONE_LOW(ix)); /* Modify the temp hyst value */ data->zone_hyst[ix == 2] = TEMP_HYST_TO_REG( TEMP_FROM_REG(data->zone_low[ix], 8) - val, ix, dme1737_read(data, DME1737_REG_ZONE_HYST(ix == 2))); dme1737_write(data, DME1737_REG_ZONE_HYST(ix == 2), data->zone_hyst[ix == 2]); break; case SYS_ZONE_AUTO_POINT1_TEMP: data->zone_low[ix] = TEMP_TO_REG(val); dme1737_write(data, DME1737_REG_ZONE_LOW(ix), data->zone_low[ix]); break; case SYS_ZONE_AUTO_POINT2_TEMP: /* Refresh the cache */ data->zone_low[ix] = dme1737_read(data, DME1737_REG_ZONE_LOW(ix)); /* Modify the temp range value (which is stored in the upper * nibble of the pwm_freq register) */ data->pwm_freq[ix] = TEMP_RANGE_TO_REG(val - TEMP_FROM_REG(data->zone_low[ix], 8), dme1737_read(data, DME1737_REG_PWM_FREQ(ix))); dme1737_write(data, DME1737_REG_PWM_FREQ(ix), data->pwm_freq[ix]); break; case SYS_ZONE_AUTO_POINT3_TEMP: data->zone_abs[ix] = TEMP_TO_REG(val); dme1737_write(data, DME1737_REG_ZONE_ABS(ix), data->zone_abs[ix]); break; default: dev_dbg(dev, "Unknown function %d.\n", fn); } mutex_unlock(&data->update_lock); return count; } /* --------------------------------------------------------------------- * Fan sysfs attributes * ix = [0-5] * --------------------------------------------------------------------- */ #define SYS_FAN_INPUT 0 #define SYS_FAN_MIN 1 #define SYS_FAN_MAX 2 #define SYS_FAN_ALARM 3 #define SYS_FAN_TYPE 4 static ssize_t show_fan(struct device *dev, struct device_attribute *attr, char *buf) { struct dme1737_data *data = dme1737_update_device(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; int res; switch (fn) { case SYS_FAN_INPUT: res = FAN_FROM_REG(data->fan[ix], ix < 4 ? 0 : FAN_TPC_FROM_REG(data->fan_opt[ix])); break; case SYS_FAN_MIN: res = FAN_FROM_REG(data->fan_min[ix], ix < 4 ? 0 : FAN_TPC_FROM_REG(data->fan_opt[ix])); break; case SYS_FAN_MAX: /* only valid for fan[5-6] */ res = FAN_MAX_FROM_REG(data->fan_max[ix - 4]); break; case SYS_FAN_ALARM: res = (data->alarms >> DME1737_BIT_ALARM_FAN[ix]) & 0x01; break; case SYS_FAN_TYPE: /* only valid for fan[1-4] */ res = FAN_TYPE_FROM_REG(data->fan_opt[ix]); break; default: res = 0; dev_dbg(dev, "Unknown function %d.\n", fn); } return sprintf(buf, "%d\n", res); } static ssize_t set_fan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dme1737_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; long val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); switch (fn) { case SYS_FAN_MIN: if (ix < 4) { data->fan_min[ix] = FAN_TO_REG(val, 0); } else { /* Refresh the cache */ data->fan_opt[ix] = dme1737_read(data, DME1737_REG_FAN_OPT(ix)); /* Modify the fan min value */ data->fan_min[ix] = FAN_TO_REG(val, FAN_TPC_FROM_REG(data->fan_opt[ix])); } dme1737_write(data, DME1737_REG_FAN_MIN(ix), data->fan_min[ix] & 0xff); dme1737_write(data, DME1737_REG_FAN_MIN(ix) + 1, data->fan_min[ix] >> 8); break; case SYS_FAN_MAX: /* Only valid for fan[5-6] */ data->fan_max[ix - 4] = FAN_MAX_TO_REG(val); dme1737_write(data, DME1737_REG_FAN_MAX(ix), data->fan_max[ix - 4]); break; case SYS_FAN_TYPE: /* Only valid for fan[1-4] */ if (!(val == 1 || val == 2 || val == 4)) { count = -EINVAL; dev_warn(dev, "Fan type value %ld not " "supported. Choose one of 1, 2, or 4.\n", val); goto exit; } data->fan_opt[ix] = FAN_TYPE_TO_REG(val, dme1737_read(data, DME1737_REG_FAN_OPT(ix))); dme1737_write(data, DME1737_REG_FAN_OPT(ix), data->fan_opt[ix]); break; default: dev_dbg(dev, "Unknown function %d.\n", fn); } exit: mutex_unlock(&data->update_lock); return count; } /* --------------------------------------------------------------------- * PWM sysfs attributes * ix = [0-4] * --------------------------------------------------------------------- */ #define SYS_PWM 0 #define SYS_PWM_FREQ 1 #define SYS_PWM_ENABLE 2 #define SYS_PWM_RAMP_RATE 3 #define SYS_PWM_AUTO_CHANNELS_ZONE 4 #define SYS_PWM_AUTO_PWM_MIN 5 #define SYS_PWM_AUTO_POINT1_PWM 6 #define SYS_PWM_AUTO_POINT2_PWM 7 static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct dme1737_data *data = dme1737_update_device(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; int res; switch (fn) { case SYS_PWM: if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 0) { res = 255; } else { res = data->pwm[ix]; } break; case SYS_PWM_FREQ: res = PWM_FREQ_FROM_REG(data->pwm_freq[ix]); break; case SYS_PWM_ENABLE: if (ix >= 3) { res = 1; /* pwm[5-6] hard-wired to manual mode */ } else { res = PWM_EN_FROM_REG(data->pwm_config[ix]); } break; case SYS_PWM_RAMP_RATE: /* Only valid for pwm[1-3] */ res = PWM_RR_FROM_REG(data->pwm_rr[ix > 0], ix); break; case SYS_PWM_AUTO_CHANNELS_ZONE: /* Only valid for pwm[1-3] */ if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) { res = PWM_ACZ_FROM_REG(data->pwm_config[ix]); } else { res = data->pwm_acz[ix]; } break; case SYS_PWM_AUTO_PWM_MIN: /* Only valid for pwm[1-3] */ if (PWM_OFF_FROM_REG(data->pwm_rr[0], ix)) { res = data->pwm_min[ix]; } else { res = 0; } break; case SYS_PWM_AUTO_POINT1_PWM: /* Only valid for pwm[1-3] */ res = data->pwm_min[ix]; break; case SYS_PWM_AUTO_POINT2_PWM: /* Only valid for pwm[1-3] */ res = 255; /* hard-wired */ break; default: res = 0; dev_dbg(dev, "Unknown function %d.\n", fn); } return sprintf(buf, "%d\n", res); } static struct attribute *dme1737_pwm_chmod_attr[]; static void dme1737_chmod_file(struct device*, struct attribute*, mode_t); static ssize_t set_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dme1737_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; long val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); switch (fn) { case SYS_PWM: data->pwm[ix] = SENSORS_LIMIT(val, 0, 255); dme1737_write(data, DME1737_REG_PWM(ix), data->pwm[ix]); break; case SYS_PWM_FREQ: data->pwm_freq[ix] = PWM_FREQ_TO_REG(val, dme1737_read(data, DME1737_REG_PWM_FREQ(ix))); dme1737_write(data, DME1737_REG_PWM_FREQ(ix), data->pwm_freq[ix]); break; case SYS_PWM_ENABLE: /* Only valid for pwm[1-3] */ if (val < 0 || val > 2) { count = -EINVAL; dev_warn(dev, "PWM enable %ld not " "supported. Choose one of 0, 1, or 2.\n", val); goto exit; } /* Refresh the cache */ data->pwm_config[ix] = dme1737_read(data, DME1737_REG_PWM_CONFIG(ix)); if (val == PWM_EN_FROM_REG(data->pwm_config[ix])) { /* Bail out if no change */ goto exit; } /* Do some housekeeping if we are currently in auto mode */ if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) { /* Save the current zone channel assignment */ data->pwm_acz[ix] = PWM_ACZ_FROM_REG( data->pwm_config[ix]); /* Save the current ramp rate state and disable it */ data->pwm_rr[ix > 0] = dme1737_read(data, DME1737_REG_PWM_RR(ix > 0)); data->pwm_rr_en &= ~(1 << ix); if (PWM_RR_EN_FROM_REG(data->pwm_rr[ix > 0], ix)) { data->pwm_rr_en |= (1 << ix); data->pwm_rr[ix > 0] = PWM_RR_EN_TO_REG(0, ix, data->pwm_rr[ix > 0]); dme1737_write(data, DME1737_REG_PWM_RR(ix > 0), data->pwm_rr[ix > 0]); } } /* Set the new PWM mode */ switch (val) { case 0: /* Change permissions of pwm[ix] to read-only */ dme1737_chmod_file(dev, dme1737_pwm_chmod_attr[ix], S_IRUGO); /* Turn fan fully on */ data->pwm_config[ix] = PWM_EN_TO_REG(0, data->pwm_config[ix]); dme1737_write(data, DME1737_REG_PWM_CONFIG(ix), data->pwm_config[ix]); break; case 1: /* Turn on manual mode */ data->pwm_config[ix] = PWM_EN_TO_REG(1, data->pwm_config[ix]); dme1737_write(data, DME1737_REG_PWM_CONFIG(ix), data->pwm_config[ix]); /* Change permissions of pwm[ix] to read-writeable */ dme1737_chmod_file(dev, dme1737_pwm_chmod_attr[ix], S_IRUGO | S_IWUSR); break; case 2: /* Change permissions of pwm[ix] to read-only */ dme1737_chmod_file(dev, dme1737_pwm_chmod_attr[ix], S_IRUGO); /* Turn on auto mode using the saved zone channel * assignment */ data->pwm_config[ix] = PWM_ACZ_TO_REG( data->pwm_acz[ix], data->pwm_config[ix]); dme1737_write(data, DME1737_REG_PWM_CONFIG(ix), data->pwm_config[ix]); /* Enable PWM ramp rate if previously enabled */ if (data->pwm_rr_en & (1 << ix)) { data->pwm_rr[ix > 0] = PWM_RR_EN_TO_REG(1, ix, dme1737_read(data, DME1737_REG_PWM_RR(ix > 0))); dme1737_write(data, DME1737_REG_PWM_RR(ix > 0), data->pwm_rr[ix > 0]); } break; } break; case SYS_PWM_RAMP_RATE: /* Only valid for pwm[1-3] */ /* Refresh the cache */ data->pwm_config[ix] = dme1737_read(data, DME1737_REG_PWM_CONFIG(ix)); data->pwm_rr[ix > 0] = dme1737_read(data, DME1737_REG_PWM_RR(ix > 0)); /* Set the ramp rate value */ if (val > 0) { data->pwm_rr[ix > 0] = PWM_RR_TO_REG(val, ix, data->pwm_rr[ix > 0]); } /* Enable/disable the feature only if the associated PWM * output is in automatic mode. */ if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) { data->pwm_rr[ix > 0] = PWM_RR_EN_TO_REG(val > 0, ix, data->pwm_rr[ix > 0]); } dme1737_write(data, DME1737_REG_PWM_RR(ix > 0), data->pwm_rr[ix > 0]); break; case SYS_PWM_AUTO_CHANNELS_ZONE: /* Only valid for pwm[1-3] */ if (!(val == 1 || val == 2 || val == 4 || val == 6 || val == 7)) { count = -EINVAL; dev_warn(dev, "PWM auto channels zone %ld " "not supported. Choose one of 1, 2, 4, 6, " "or 7.\n", val); goto exit; } /* Refresh the cache */ data->pwm_config[ix] = dme1737_read(data, DME1737_REG_PWM_CONFIG(ix)); if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) { /* PWM is already in auto mode so update the temp * channel assignment */ data->pwm_config[ix] = PWM_ACZ_TO_REG(val, data->pwm_config[ix]); dme1737_write(data, DME1737_REG_PWM_CONFIG(ix), data->pwm_config[ix]); } else { /* PWM is not in auto mode so we save the temp * channel assignment for later use */ data->pwm_acz[ix] = val; } break; case SYS_PWM_AUTO_PWM_MIN: /* Only valid for pwm[1-3] */ /* Refresh the cache */ data->pwm_min[ix] = dme1737_read(data, DME1737_REG_PWM_MIN(ix)); /* There are only 2 values supported for the auto_pwm_min * value: 0 or auto_point1_pwm. So if the temperature drops * below the auto_point1_temp_hyst value, the fan either turns * off or runs at auto_point1_pwm duty-cycle. */ if (val > ((data->pwm_min[ix] + 1) / 2)) { data->pwm_rr[0] = PWM_OFF_TO_REG(1, ix, dme1737_read(data, DME1737_REG_PWM_RR(0))); } else { data->pwm_rr[0] = PWM_OFF_TO_REG(0, ix, dme1737_read(data, DME1737_REG_PWM_RR(0))); } dme1737_write(data, DME1737_REG_PWM_RR(0), data->pwm_rr[0]); break; case SYS_PWM_AUTO_POINT1_PWM: /* Only valid for pwm[1-3] */ data->pwm_min[ix] = SENSORS_LIMIT(val, 0, 255); dme1737_write(data, DME1737_REG_PWM_MIN(ix), data->pwm_min[ix]); break; default: dev_dbg(dev, "Unknown function %d.\n", fn); } exit: mutex_unlock(&data->update_lock); return count; } /* --------------------------------------------------------------------- * Miscellaneous sysfs attributes * --------------------------------------------------------------------- */ static ssize_t show_vrm(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct dme1737_data *data = i2c_get_clientdata(client); return sprintf(buf, "%d\n", data->vrm); } static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dme1737_data *data = dev_get_drvdata(dev); long val = simple_strtol(buf, NULL, 10); data->vrm = val; return count; } static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf) { struct dme1737_data *data = dme1737_update_device(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct dme1737_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } /* --------------------------------------------------------------------- * Sysfs device attribute defines and structs * --------------------------------------------------------------------- */ /* Voltages 0-6 */ #define SENSOR_DEVICE_ATTR_IN(ix) \ static SENSOR_DEVICE_ATTR_2(in##ix##_input, S_IRUGO, \ show_in, NULL, SYS_IN_INPUT, ix); \ static SENSOR_DEVICE_ATTR_2(in##ix##_min, S_IRUGO | S_IWUSR, \ show_in, set_in, SYS_IN_MIN, ix); \ static SENSOR_DEVICE_ATTR_2(in##ix##_max, S_IRUGO | S_IWUSR, \ show_in, set_in, SYS_IN_MAX, ix); \ static SENSOR_DEVICE_ATTR_2(in##ix##_alarm, S_IRUGO, \ show_in, NULL, SYS_IN_ALARM, ix) SENSOR_DEVICE_ATTR_IN(0); SENSOR_DEVICE_ATTR_IN(1); SENSOR_DEVICE_ATTR_IN(2); SENSOR_DEVICE_ATTR_IN(3); SENSOR_DEVICE_ATTR_IN(4); SENSOR_DEVICE_ATTR_IN(5); SENSOR_DEVICE_ATTR_IN(6); /* Temperatures 1-3 */ #define SENSOR_DEVICE_ATTR_TEMP(ix) \ static SENSOR_DEVICE_ATTR_2(temp##ix##_input, S_IRUGO, \ show_temp, NULL, SYS_TEMP_INPUT, ix-1); \ static SENSOR_DEVICE_ATTR_2(temp##ix##_min, S_IRUGO | S_IWUSR, \ show_temp, set_temp, SYS_TEMP_MIN, ix-1); \ static SENSOR_DEVICE_ATTR_2(temp##ix##_max, S_IRUGO | S_IWUSR, \ show_temp, set_temp, SYS_TEMP_MAX, ix-1); \ static SENSOR_DEVICE_ATTR_2(temp##ix##_offset, S_IRUGO, \ show_temp, set_temp, SYS_TEMP_OFFSET, ix-1); \ static SENSOR_DEVICE_ATTR_2(temp##ix##_alarm, S_IRUGO, \ show_temp, NULL, SYS_TEMP_ALARM, ix-1); \ static SENSOR_DEVICE_ATTR_2(temp##ix##_fault, S_IRUGO, \ show_temp, NULL, SYS_TEMP_FAULT, ix-1) SENSOR_DEVICE_ATTR_TEMP(1); SENSOR_DEVICE_ATTR_TEMP(2); SENSOR_DEVICE_ATTR_TEMP(3); /* Zones 1-3 */ #define SENSOR_DEVICE_ATTR_ZONE(ix) \ static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_channels_temp, S_IRUGO, \ show_zone, NULL, SYS_ZONE_AUTO_CHANNELS_TEMP, ix-1); \ static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point1_temp_hyst, S_IRUGO, \ show_zone, set_zone, SYS_ZONE_AUTO_POINT1_TEMP_HYST, ix-1); \ static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point1_temp, S_IRUGO, \ show_zone, set_zone, SYS_ZONE_AUTO_POINT1_TEMP, ix-1); \ static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point2_temp, S_IRUGO, \ show_zone, set_zone, SYS_ZONE_AUTO_POINT2_TEMP, ix-1); \ static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point3_temp, S_IRUGO, \ show_zone, set_zone, SYS_ZONE_AUTO_POINT3_TEMP, ix-1) SENSOR_DEVICE_ATTR_ZONE(1); SENSOR_DEVICE_ATTR_ZONE(2); SENSOR_DEVICE_ATTR_ZONE(3); /* Fans 1-4 */ #define SENSOR_DEVICE_ATTR_FAN_1TO4(ix) \ static SENSOR_DEVICE_ATTR_2(fan##ix##_input, S_IRUGO, \ show_fan, NULL, SYS_FAN_INPUT, ix-1); \ static SENSOR_DEVICE_ATTR_2(fan##ix##_min, S_IRUGO | S_IWUSR, \ show_fan, set_fan, SYS_FAN_MIN, ix-1); \ static SENSOR_DEVICE_ATTR_2(fan##ix##_alarm, S_IRUGO, \ show_fan, NULL, SYS_FAN_ALARM, ix-1); \ static SENSOR_DEVICE_ATTR_2(fan##ix##_type, S_IRUGO | S_IWUSR, \ show_fan, set_fan, SYS_FAN_TYPE, ix-1) SENSOR_DEVICE_ATTR_FAN_1TO4(1); SENSOR_DEVICE_ATTR_FAN_1TO4(2); SENSOR_DEVICE_ATTR_FAN_1TO4(3); SENSOR_DEVICE_ATTR_FAN_1TO4(4); /* Fans 5-6 */ #define SENSOR_DEVICE_ATTR_FAN_5TO6(ix) \ static SENSOR_DEVICE_ATTR_2(fan##ix##_input, S_IRUGO, \ show_fan, NULL, SYS_FAN_INPUT, ix-1); \ static SENSOR_DEVICE_ATTR_2(fan##ix##_min, S_IRUGO | S_IWUSR, \ show_fan, set_fan, SYS_FAN_MIN, ix-1); \ static SENSOR_DEVICE_ATTR_2(fan##ix##_alarm, S_IRUGO, \ show_fan, NULL, SYS_FAN_ALARM, ix-1); \ static SENSOR_DEVICE_ATTR_2(fan##ix##_max, S_IRUGO | S_IWUSR, \ show_fan, set_fan, SYS_FAN_MAX, ix-1) SENSOR_DEVICE_ATTR_FAN_5TO6(5); SENSOR_DEVICE_ATTR_FAN_5TO6(6); /* PWMs 1-3 */ #define SENSOR_DEVICE_ATTR_PWM_1TO3(ix) \ static SENSOR_DEVICE_ATTR_2(pwm##ix, S_IRUGO, \ show_pwm, set_pwm, SYS_PWM, ix-1); \ static SENSOR_DEVICE_ATTR_2(pwm##ix##_freq, S_IRUGO, \ show_pwm, set_pwm, SYS_PWM_FREQ, ix-1); \ static SENSOR_DEVICE_ATTR_2(pwm##ix##_enable, S_IRUGO, \ show_pwm, set_pwm, SYS_PWM_ENABLE, ix-1); \ static SENSOR_DEVICE_ATTR_2(pwm##ix##_ramp_rate, S_IRUGO, \ show_pwm, set_pwm, SYS_PWM_RAMP_RATE, ix-1); \ static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_channels_zone, S_IRUGO, \ show_pwm, set_pwm, SYS_PWM_AUTO_CHANNELS_ZONE, ix-1); \ static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_pwm_min, S_IRUGO, \ show_pwm, set_pwm, SYS_PWM_AUTO_PWM_MIN, ix-1); \ static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_point1_pwm, S_IRUGO, \ show_pwm, set_pwm, SYS_PWM_AUTO_POINT1_PWM, ix-1); \ static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_point2_pwm, S_IRUGO, \ show_pwm, NULL, SYS_PWM_AUTO_POINT2_PWM, ix-1) SENSOR_DEVICE_ATTR_PWM_1TO3(1); SENSOR_DEVICE_ATTR_PWM_1TO3(2); SENSOR_DEVICE_ATTR_PWM_1TO3(3); /* PWMs 5-6 */ #define SENSOR_DEVICE_ATTR_PWM_5TO6(ix) \ static SENSOR_DEVICE_ATTR_2(pwm##ix, S_IRUGO, \ show_pwm, set_pwm, SYS_PWM, ix-1); \ static SENSOR_DEVICE_ATTR_2(pwm##ix##_freq, S_IRUGO, \ show_pwm, set_pwm, SYS_PWM_FREQ, ix-1); \ static SENSOR_DEVICE_ATTR_2(pwm##ix##_enable, S_IRUGO, \ show_pwm, NULL, SYS_PWM_ENABLE, ix-1) SENSOR_DEVICE_ATTR_PWM_5TO6(5); SENSOR_DEVICE_ATTR_PWM_5TO6(6); /* Misc */ static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm); static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL); static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); /* for ISA devices */ /* This struct holds all the attributes that are always present and need to be * created unconditionally. The attributes that need modification of their * permissions are created read-only and write permissions are added or removed * on the fly when required */ static struct attribute *dme1737_attr[] ={ /* Voltages */ &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in6_alarm.dev_attr.attr, /* Temperatures */ &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp1_fault.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, /* Zones */ &sensor_dev_attr_zone1_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_zone1_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone1_auto_point3_temp.dev_attr.attr, &sensor_dev_attr_zone1_auto_channels_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_channels_temp.dev_attr.attr, &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr, &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr, NULL }; static const struct attribute_group dme1737_group = { .attrs = dme1737_attr, }; /* The following struct holds misc attributes, which are not available in all * chips. Their creation depends on the chip type which is determined during * module load. */ static struct attribute *dme1737_misc_attr[] = { /* Temperatures */ &sensor_dev_attr_temp1_offset.dev_attr.attr, &sensor_dev_attr_temp2_offset.dev_attr.attr, &sensor_dev_attr_temp3_offset.dev_attr.attr, /* Zones */ &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr, &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr, &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr, NULL }; static const struct attribute_group dme1737_misc_group = { .attrs = dme1737_misc_attr, }; /* The following struct holds VID-related attributes. Their creation depends on the chip type which is determined during module load. */ static struct attribute *dme1737_vid_attr[] = { &dev_attr_vrm.attr, &dev_attr_cpu0_vid.attr, NULL }; static const struct attribute_group dme1737_vid_group = { .attrs = dme1737_vid_attr, }; /* The following structs hold the PWM attributes, some of which are optional. * Their creation depends on the chip configuration which is determined during * module load. */ static struct attribute *dme1737_pwm1_attr[] = { &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm1_freq.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm1_ramp_rate.dev_attr.attr, &sensor_dev_attr_pwm1_auto_channels_zone.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, NULL }; static struct attribute *dme1737_pwm2_attr[] = { &sensor_dev_attr_pwm2.dev_attr.attr, &sensor_dev_attr_pwm2_freq.dev_attr.attr, &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm2_ramp_rate.dev_attr.attr, &sensor_dev_attr_pwm2_auto_channels_zone.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr, NULL }; static struct attribute *dme1737_pwm3_attr[] = { &sensor_dev_attr_pwm3.dev_attr.attr, &sensor_dev_attr_pwm3_freq.dev_attr.attr, &sensor_dev_attr_pwm3_enable.dev_attr.attr, &sensor_dev_attr_pwm3_ramp_rate.dev_attr.attr, &sensor_dev_attr_pwm3_auto_channels_zone.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr, NULL }; static struct attribute *dme1737_pwm5_attr[] = { &sensor_dev_attr_pwm5.dev_attr.attr, &sensor_dev_attr_pwm5_freq.dev_attr.attr, &sensor_dev_attr_pwm5_enable.dev_attr.attr, NULL }; static struct attribute *dme1737_pwm6_attr[] = { &sensor_dev_attr_pwm6.dev_attr.attr, &sensor_dev_attr_pwm6_freq.dev_attr.attr, &sensor_dev_attr_pwm6_enable.dev_attr.attr, NULL }; static const struct attribute_group dme1737_pwm_group[] = { { .attrs = dme1737_pwm1_attr }, { .attrs = dme1737_pwm2_attr }, { .attrs = dme1737_pwm3_attr }, { .attrs = NULL }, { .attrs = dme1737_pwm5_attr }, { .attrs = dme1737_pwm6_attr }, }; /* The following struct holds misc PWM attributes, which are not available in * all chips. Their creation depends on the chip type which is determined * during module load. */ static struct attribute *dme1737_pwm_misc_attr[] = { &sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr, }; /* The following structs hold the fan attributes, some of which are optional. * Their creation depends on the chip configuration which is determined during * module load. */ static struct attribute *dme1737_fan1_attr[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan1_type.dev_attr.attr, NULL }; static struct attribute *dme1737_fan2_attr[] = { &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_fan2_type.dev_attr.attr, NULL }; static struct attribute *dme1737_fan3_attr[] = { &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan3_min.dev_attr.attr, &sensor_dev_attr_fan3_alarm.dev_attr.attr, &sensor_dev_attr_fan3_type.dev_attr.attr, NULL }; static struct attribute *dme1737_fan4_attr[] = { &sensor_dev_attr_fan4_input.dev_attr.attr, &sensor_dev_attr_fan4_min.dev_attr.attr, &sensor_dev_attr_fan4_alarm.dev_attr.attr, &sensor_dev_attr_fan4_type.dev_attr.attr, NULL }; static struct attribute *dme1737_fan5_attr[] = { &sensor_dev_attr_fan5_input.dev_attr.attr, &sensor_dev_attr_fan5_min.dev_attr.attr, &sensor_dev_attr_fan5_alarm.dev_attr.attr, &sensor_dev_attr_fan5_max.dev_attr.attr, NULL }; static struct attribute *dme1737_fan6_attr[] = { &sensor_dev_attr_fan6_input.dev_attr.attr, &sensor_dev_attr_fan6_min.dev_attr.attr, &sensor_dev_attr_fan6_alarm.dev_attr.attr, &sensor_dev_attr_fan6_max.dev_attr.attr, NULL }; static const struct attribute_group dme1737_fan_group[] = { { .attrs = dme1737_fan1_attr }, { .attrs = dme1737_fan2_attr }, { .attrs = dme1737_fan3_attr }, { .attrs = dme1737_fan4_attr }, { .attrs = dme1737_fan5_attr }, { .attrs = dme1737_fan6_attr }, }; /* The permissions of the following zone attributes are changed to read- * writeable if the chip is *not* locked. Otherwise they stay read-only. */ static struct attribute *dme1737_zone_chmod_attr[] = { &sensor_dev_attr_zone1_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_zone1_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone1_auto_point3_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr, &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr, NULL }; static const struct attribute_group dme1737_zone_chmod_group = { .attrs = dme1737_zone_chmod_attr, }; /* The permissions of the following PWM attributes are changed to read- * writeable if the chip is *not* locked and the respective PWM is available. * Otherwise they stay read-only. */ static struct attribute *dme1737_pwm1_chmod_attr[] = { &sensor_dev_attr_pwm1_freq.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm1_ramp_rate.dev_attr.attr, &sensor_dev_attr_pwm1_auto_channels_zone.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, NULL }; static struct attribute *dme1737_pwm2_chmod_attr[] = { &sensor_dev_attr_pwm2_freq.dev_attr.attr, &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm2_ramp_rate.dev_attr.attr, &sensor_dev_attr_pwm2_auto_channels_zone.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr, NULL }; static struct attribute *dme1737_pwm3_chmod_attr[] = { &sensor_dev_attr_pwm3_freq.dev_attr.attr, &sensor_dev_attr_pwm3_enable.dev_attr.attr, &sensor_dev_attr_pwm3_ramp_rate.dev_attr.attr, &sensor_dev_attr_pwm3_auto_channels_zone.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr, NULL }; static struct attribute *dme1737_pwm5_chmod_attr[] = { &sensor_dev_attr_pwm5.dev_attr.attr, &sensor_dev_attr_pwm5_freq.dev_attr.attr, NULL }; static struct attribute *dme1737_pwm6_chmod_attr[] = { &sensor_dev_attr_pwm6.dev_attr.attr, &sensor_dev_attr_pwm6_freq.dev_attr.attr, NULL }; static const struct attribute_group dme1737_pwm_chmod_group[] = { { .attrs = dme1737_pwm1_chmod_attr }, { .attrs = dme1737_pwm2_chmod_attr }, { .attrs = dme1737_pwm3_chmod_attr }, { .attrs = NULL }, { .attrs = dme1737_pwm5_chmod_attr }, { .attrs = dme1737_pwm6_chmod_attr }, }; /* Pwm[1-3] are read-writeable if the associated pwm is in manual mode and the * chip is not locked. Otherwise they are read-only. */ static struct attribute *dme1737_pwm_chmod_attr[] = { &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &sensor_dev_attr_pwm3.dev_attr.attr, }; /* --------------------------------------------------------------------- * Super-IO functions * --------------------------------------------------------------------- */ static inline void dme1737_sio_enter(int sio_cip) { outb(0x55, sio_cip); } static inline void dme1737_sio_exit(int sio_cip) { outb(0xaa, sio_cip); } static inline int dme1737_sio_inb(int sio_cip, int reg) { outb(reg, sio_cip); return inb(sio_cip + 1); } static inline void dme1737_sio_outb(int sio_cip, int reg, int val) { outb(reg, sio_cip); outb(val, sio_cip + 1); } /* --------------------------------------------------------------------- * Device initialization * --------------------------------------------------------------------- */ static int dme1737_i2c_get_features(int, struct dme1737_data*); static void dme1737_chmod_file(struct device *dev, struct attribute *attr, mode_t mode) { if (sysfs_chmod_file(&dev->kobj, attr, mode)) { dev_warn(dev, "Failed to change permissions of %s.\n", attr->name); } } static void dme1737_chmod_group(struct device *dev, const struct attribute_group *group, mode_t mode) { struct attribute **attr; for (attr = group->attrs; *attr; attr++) { dme1737_chmod_file(dev, *attr, mode); } } static void dme1737_remove_files(struct device *dev) { struct dme1737_data *data = dev_get_drvdata(dev); int ix; for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { if (data->has_fan & (1 << ix)) { sysfs_remove_group(&dev->kobj, &dme1737_fan_group[ix]); } } for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) { if (data->has_pwm & (1 << ix)) { sysfs_remove_group(&dev->kobj, &dme1737_pwm_group[ix]); if (data->type != sch5027 && ix < 3) { sysfs_remove_file(&dev->kobj, dme1737_pwm_misc_attr[ix]); } } } if (data->type != sch5027) { sysfs_remove_group(&dev->kobj, &dme1737_misc_group); } if (data->type == dme1737) { sysfs_remove_group(&dev->kobj, &dme1737_vid_group); } sysfs_remove_group(&dev->kobj, &dme1737_group); if (!data->client) { sysfs_remove_file(&dev->kobj, &dev_attr_name.attr); } } static int dme1737_create_files(struct device *dev) { struct dme1737_data *data = dev_get_drvdata(dev); int err, ix; /* Create a name attribute for ISA devices */ if (!data->client && (err = sysfs_create_file(&dev->kobj, &dev_attr_name.attr))) { goto exit; } /* Create standard sysfs attributes */ if ((err = sysfs_create_group(&dev->kobj, &dme1737_group))) { goto exit_remove; } /* Create misc sysfs attributes */ if ((data->type != sch5027) && (err = sysfs_create_group(&dev->kobj, &dme1737_misc_group))) { goto exit_remove; } /* Create VID-related sysfs attributes */ if ((data->type == dme1737) && (err = sysfs_create_group(&dev->kobj, &dme1737_vid_group))) { goto exit_remove; } /* Create fan sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { if (data->has_fan & (1 << ix)) { if ((err = sysfs_create_group(&dev->kobj, &dme1737_fan_group[ix]))) { goto exit_remove; } } } /* Create PWM sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) { if (data->has_pwm & (1 << ix)) { if ((err = sysfs_create_group(&dev->kobj, &dme1737_pwm_group[ix]))) { goto exit_remove; } if (data->type != sch5027 && ix < 3 && (err = sysfs_create_file(&dev->kobj, dme1737_pwm_misc_attr[ix]))) { goto exit_remove; } } } /* Inform if the device is locked. Otherwise change the permissions of * selected attributes from read-only to read-writeable. */ if (data->config & 0x02) { dev_info(dev, "Device is locked. Some attributes " "will be read-only.\n"); } else { /* Change permissions of zone sysfs attributes */ dme1737_chmod_group(dev, &dme1737_zone_chmod_group, S_IRUGO | S_IWUSR); /* Change permissions of misc sysfs attributes */ if (data->type != sch5027) { dme1737_chmod_group(dev, &dme1737_misc_group, S_IRUGO | S_IWUSR); } /* Change permissions of PWM sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_chmod_group); ix++) { if (data->has_pwm & (1 << ix)) { dme1737_chmod_group(dev, &dme1737_pwm_chmod_group[ix], S_IRUGO | S_IWUSR); if (data->type != sch5027 && ix < 3) { dme1737_chmod_file(dev, dme1737_pwm_misc_attr[ix], S_IRUGO | S_IWUSR); } } } /* Change permissions of pwm[1-3] if in manual mode */ for (ix = 0; ix < 3; ix++) { if ((data->has_pwm & (1 << ix)) && (PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) { dme1737_chmod_file(dev, dme1737_pwm_chmod_attr[ix], S_IRUGO | S_IWUSR); } } } return 0; exit_remove: dme1737_remove_files(dev); exit: return err; } static int dme1737_init_device(struct device *dev) { struct dme1737_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; int ix; u8 reg; /* Point to the right nominal voltages array */ data->in_nominal = IN_NOMINAL(data->type); data->config = dme1737_read(data, DME1737_REG_CONFIG); /* Inform if part is not monitoring/started */ if (!(data->config & 0x01)) { if (!force_start) { dev_err(dev, "Device is not monitoring. " "Use the force_start load parameter to " "override.\n"); return -EFAULT; } /* Force monitoring */ data->config |= 0x01; dme1737_write(data, DME1737_REG_CONFIG, data->config); } /* Inform if part is not ready */ if (!(data->config & 0x04)) { dev_err(dev, "Device is not ready.\n"); return -EFAULT; } /* Determine which optional fan and pwm features are enabled/present */ if (client) { /* I2C chip */ data->config2 = dme1737_read(data, DME1737_REG_CONFIG2); /* Check if optional fan3 input is enabled */ if (data->config2 & 0x04) { data->has_fan |= (1 << 2); } /* Fan4 and pwm3 are only available if the client's I2C address * is the default 0x2e. Otherwise the I/Os associated with * these functions are used for addr enable/select. */ if (client->addr == 0x2e) { data->has_fan |= (1 << 3); data->has_pwm |= (1 << 2); } /* Determine which of the optional fan[5-6] and pwm[5-6] * features are enabled. For this, we need to query the runtime * registers through the Super-IO LPC interface. Try both * config ports 0x2e and 0x4e. */ if (dme1737_i2c_get_features(0x2e, data) && dme1737_i2c_get_features(0x4e, data)) { dev_warn(dev, "Failed to query Super-IO for optional " "features.\n"); } } else { /* ISA chip */ /* Fan3 and pwm3 are always available. Fan[4-5] and pwm[5-6] * don't exist in the ISA chip. */ data->has_fan |= (1 << 2); data->has_pwm |= (1 << 2); } /* Fan1, fan2, pwm1, and pwm2 are always present */ data->has_fan |= 0x03; data->has_pwm |= 0x03; dev_info(dev, "Optional features: pwm3=%s, pwm5=%s, pwm6=%s, " "fan3=%s, fan4=%s, fan5=%s, fan6=%s.\n", (data->has_pwm & (1 << 2)) ? "yes" : "no", (data->has_pwm & (1 << 4)) ? "yes" : "no", (data->has_pwm & (1 << 5)) ? "yes" : "no", (data->has_fan & (1 << 2)) ? "yes" : "no", (data->has_fan & (1 << 3)) ? "yes" : "no", (data->has_fan & (1 << 4)) ? "yes" : "no", (data->has_fan & (1 << 5)) ? "yes" : "no"); reg = dme1737_read(data, DME1737_REG_TACH_PWM); /* Inform if fan-to-pwm mapping differs from the default */ if (client && reg != 0xa4) { /* I2C chip */ dev_warn(dev, "Non-standard fan to pwm mapping: " "fan1->pwm%d, fan2->pwm%d, fan3->pwm%d, " "fan4->pwm%d. Please report to the driver " "maintainer.\n", (reg & 0x03) + 1, ((reg >> 2) & 0x03) + 1, ((reg >> 4) & 0x03) + 1, ((reg >> 6) & 0x03) + 1); } else if (!client && reg != 0x24) { /* ISA chip */ dev_warn(dev, "Non-standard fan to pwm mapping: " "fan1->pwm%d, fan2->pwm%d, fan3->pwm%d. " "Please report to the driver maintainer.\n", (reg & 0x03) + 1, ((reg >> 2) & 0x03) + 1, ((reg >> 4) & 0x03) + 1); } /* Switch pwm[1-3] to manual mode if they are currently disabled and * set the duty-cycles to 0% (which is identical to the PWMs being * disabled). */ if (!(data->config & 0x02)) { for (ix = 0; ix < 3; ix++) { data->pwm_config[ix] = dme1737_read(data, DME1737_REG_PWM_CONFIG(ix)); if ((data->has_pwm & (1 << ix)) && (PWM_EN_FROM_REG(data->pwm_config[ix]) == -1)) { dev_info(dev, "Switching pwm%d to " "manual mode.\n", ix + 1); data->pwm_config[ix] = PWM_EN_TO_REG(1, data->pwm_config[ix]); dme1737_write(data, DME1737_REG_PWM(ix), 0); dme1737_write(data, DME1737_REG_PWM_CONFIG(ix), data->pwm_config[ix]); } } } /* Initialize the default PWM auto channels zone (acz) assignments */ data->pwm_acz[0] = 1; /* pwm1 -> zone1 */ data->pwm_acz[1] = 2; /* pwm2 -> zone2 */ data->pwm_acz[2] = 4; /* pwm3 -> zone3 */ /* Set VRM */ if (data->type == dme1737) { data->vrm = vid_which_vrm(); } return 0; } /* --------------------------------------------------------------------- * I2C device detection and registration * --------------------------------------------------------------------- */ static struct i2c_driver dme1737_i2c_driver; static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data) { int err = 0, reg; u16 addr; dme1737_sio_enter(sio_cip); /* Check device ID * The DME1737 can return either 0x78 or 0x77 as its device ID. * The SCH5027 returns 0x89 as its device ID. */ reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20); if (!(reg == 0x77 || reg == 0x78 || reg == 0x89)) { err = -ENODEV; goto exit; } /* Select logical device A (runtime registers) */ dme1737_sio_outb(sio_cip, 0x07, 0x0a); /* Get the base address of the runtime registers */ if (!(addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) | dme1737_sio_inb(sio_cip, 0x61))) { err = -ENODEV; goto exit; } /* Read the runtime registers to determine which optional features * are enabled and available. Bits [3:2] of registers 0x43-0x46 are set * to '10' if the respective feature is enabled. */ if ((inb(addr + 0x43) & 0x0c) == 0x08) { /* fan6 */ data->has_fan |= (1 << 5); } if ((inb(addr + 0x44) & 0x0c) == 0x08) { /* pwm6 */ data->has_pwm |= (1 << 5); } if ((inb(addr + 0x45) & 0x0c) == 0x08) { /* fan5 */ data->has_fan |= (1 << 4); } if ((inb(addr + 0x46) & 0x0c) == 0x08) { /* pwm5 */ data->has_pwm |= (1 << 4); } exit: dme1737_sio_exit(sio_cip); return err; } /* Return 0 if detection is successful, -ENODEV otherwise */ static int dme1737_i2c_detect(struct i2c_client *client, int kind, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; struct device *dev = &adapter->dev; u8 company, verstep = 0; const char *name; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { return -ENODEV; } /* A negative kind means that the driver was loaded with no force * parameter (default), so we must identify the chip. */ if (kind < 0) { company = i2c_smbus_read_byte_data(client, DME1737_REG_COMPANY); verstep = i2c_smbus_read_byte_data(client, DME1737_REG_VERSTEP); if (company == DME1737_COMPANY_SMSC && (verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP) { kind = dme1737; } else if (company == DME1737_COMPANY_SMSC && verstep == SCH5027_VERSTEP) { kind = sch5027; } else { return -ENODEV; } } if (kind == sch5027) { name = "sch5027"; } else { kind = dme1737; name = "dme1737"; } dev_info(dev, "Found a %s chip at 0x%02x (rev 0x%02x).\n", kind == sch5027 ? "SCH5027" : "DME1737", client->addr, verstep); strlcpy(info->type, name, I2C_NAME_SIZE); return 0; } static int dme1737_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct dme1737_data *data; struct device *dev = &client->dev; int err; data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); data->type = id->driver_data; data->client = client; data->name = client->name; mutex_init(&data->update_lock); /* Initialize the DME1737 chip */ if ((err = dme1737_init_device(dev))) { dev_err(dev, "Failed to initialize device.\n"); goto exit_kfree; } /* Create sysfs files */ if ((err = dme1737_create_files(dev))) { dev_err(dev, "Failed to create sysfs files.\n"); goto exit_kfree; } /* Register device */ data->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(data->hwmon_dev)) { dev_err(dev, "Failed to register device.\n"); err = PTR_ERR(data->hwmon_dev); goto exit_remove; } return 0; exit_remove: dme1737_remove_files(dev); exit_kfree: kfree(data); exit: return err; } static int dme1737_i2c_remove(struct i2c_client *client) { struct dme1737_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); dme1737_remove_files(&client->dev); kfree(data); return 0; } static const struct i2c_device_id dme1737_id[] = { { "dme1737", dme1737 }, { "sch5027", sch5027 }, { } }; MODULE_DEVICE_TABLE(i2c, dme1737_id); static struct i2c_driver dme1737_i2c_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "dme1737", }, .probe = dme1737_i2c_probe, .remove = dme1737_i2c_remove, .id_table = dme1737_id, .detect = dme1737_i2c_detect, .address_data = &addr_data, }; /* --------------------------------------------------------------------- * ISA device detection and registration * --------------------------------------------------------------------- */ static int __init dme1737_isa_detect(int sio_cip, unsigned short *addr) { int err = 0, reg; unsigned short base_addr; dme1737_sio_enter(sio_cip); /* Check device ID * We currently know about SCH3112 (0x7c), SCH3114 (0x7d), and * SCH3116 (0x7f). */ reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20); if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) { err = -ENODEV; goto exit; } /* Select logical device A (runtime registers) */ dme1737_sio_outb(sio_cip, 0x07, 0x0a); /* Get the base address of the runtime registers */ if (!(base_addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) | dme1737_sio_inb(sio_cip, 0x61))) { printk(KERN_ERR "dme1737: Base address not set.\n"); err = -ENODEV; goto exit; } /* Access to the hwmon registers is through an index/data register * pair located at offset 0x70/0x71. */ *addr = base_addr + 0x70; exit: dme1737_sio_exit(sio_cip); return err; } static int __init dme1737_isa_device_add(unsigned short addr) { struct resource res = { .start = addr, .end = addr + DME1737_EXTENT - 1, .name = "dme1737", .flags = IORESOURCE_IO, }; int err; err = acpi_check_resource_conflict(&res); if (err) goto exit; if (!(pdev = platform_device_alloc("dme1737", addr))) { printk(KERN_ERR "dme1737: Failed to allocate device.\n"); err = -ENOMEM; goto exit; } if ((err = platform_device_add_resources(pdev, &res, 1))) { printk(KERN_ERR "dme1737: Failed to add device resource " "(err = %d).\n", err); goto exit_device_put; } if ((err = platform_device_add(pdev))) { printk(KERN_ERR "dme1737: Failed to add device (err = %d).\n", err); goto exit_device_put; } return 0; exit_device_put: platform_device_put(pdev); pdev = NULL; exit: return err; } static int __devinit dme1737_isa_probe(struct platform_device *pdev) { u8 company, device; struct resource *res; struct dme1737_data *data; struct device *dev = &pdev->dev; int err; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, DME1737_EXTENT, "dme1737")) { dev_err(dev, "Failed to request region 0x%04x-0x%04x.\n", (unsigned short)res->start, (unsigned short)res->start + DME1737_EXTENT - 1); err = -EBUSY; goto exit; } if (!(data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL))) { err = -ENOMEM; goto exit_release_region; } data->addr = res->start; platform_set_drvdata(pdev, data); /* Skip chip detection if module is loaded with force_id parameter */ if (!force_id) { company = dme1737_read(data, DME1737_REG_COMPANY); device = dme1737_read(data, DME1737_REG_DEVICE); if (!((company == DME1737_COMPANY_SMSC) && (device == SCH311X_DEVICE))) { err = -ENODEV; goto exit_kfree; } } data->type = sch311x; /* Fill in the remaining client fields and initialize the mutex */ data->name = "sch311x"; mutex_init(&data->update_lock); dev_info(dev, "Found a SCH311x chip at 0x%04x\n", data->addr); /* Initialize the chip */ if ((err = dme1737_init_device(dev))) { dev_err(dev, "Failed to initialize device.\n"); goto exit_kfree; } /* Create sysfs files */ if ((err = dme1737_create_files(dev))) { dev_err(dev, "Failed to create sysfs files.\n"); goto exit_kfree; } /* Register device */ data->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(data->hwmon_dev)) { dev_err(dev, "Failed to register device.\n"); err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: dme1737_remove_files(dev); exit_kfree: platform_set_drvdata(pdev, NULL); kfree(data); exit_release_region: release_region(res->start, DME1737_EXTENT); exit: return err; } static int __devexit dme1737_isa_remove(struct platform_device *pdev) { struct dme1737_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); dme1737_remove_files(&pdev->dev); release_region(data->addr, DME1737_EXTENT); platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static struct platform_driver dme1737_isa_driver = { .driver = { .owner = THIS_MODULE, .name = "dme1737", }, .probe = dme1737_isa_probe, .remove = __devexit_p(dme1737_isa_remove), }; /* --------------------------------------------------------------------- * Module initialization and cleanup * --------------------------------------------------------------------- */ static int __init dme1737_init(void) { int err; unsigned short addr; if ((err = i2c_add_driver(&dme1737_i2c_driver))) { goto exit; } if (dme1737_isa_detect(0x2e, &addr) && dme1737_isa_detect(0x4e, &addr) && (!probe_all_addr || (dme1737_isa_detect(0x162e, &addr) && dme1737_isa_detect(0x164e, &addr)))) { /* Return 0 if we didn't find an ISA device */ return 0; } if ((err = platform_driver_register(&dme1737_isa_driver))) { goto exit_del_i2c_driver; } /* Sets global pdev as a side effect */ if ((err = dme1737_isa_device_add(addr))) { goto exit_del_isa_driver; } return 0; exit_del_isa_driver: platform_driver_unregister(&dme1737_isa_driver); exit_del_i2c_driver: i2c_del_driver(&dme1737_i2c_driver); exit: return err; } static void __exit dme1737_exit(void) { if (pdev) { platform_device_unregister(pdev); platform_driver_unregister(&dme1737_isa_driver); } i2c_del_driver(&dme1737_i2c_driver); } MODULE_AUTHOR("Juerg Haefliger <juergh@gmail.com>"); MODULE_DESCRIPTION("DME1737 sensors"); MODULE_LICENSE("GPL"); module_init(dme1737_init); module_exit(dme1737_exit);
gpl-2.0
sembre/kernel_totoro_update3
common/drivers/media/video/tlg2300/pd-main.c
761
12384
/* * device driver for Telegent tlg2300 based TV cards * * Author : * Kang Yong <kangyong@telegent.com> * Zhang Xiaobing <xbzhang@telegent.com> * Huang Shijie <zyziii@telegent.com> or <shijie8@gmail.com> * * (c) 2009 Telegent Systems * (c) 2010 Telegent Systems * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/suspend.h> #include <linux/usb/quirks.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/types.h> #include <linux/firmware.h> #include <linux/smp_lock.h> #include "vendorcmds.h" #include "pd-common.h" #define VENDOR_ID 0x1B24 #define PRODUCT_ID 0x4001 static struct usb_device_id id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID, PRODUCT_ID, 255, 1, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID, PRODUCT_ID, 255, 1, 1) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); int debug_mode; module_param(debug_mode, int, 0644); MODULE_PARM_DESC(debug_mode, "0 = disable, 1 = enable, 2 = verbose"); static const char *firmware_name = "tlg2300_firmware.bin"; static struct usb_driver poseidon_driver; static LIST_HEAD(pd_device_list); /* * send set request to USB firmware. */ s32 send_set_req(struct poseidon *pd, u8 cmdid, s32 param, s32 *cmd_status) { s32 ret; s8 data[32] = {}; u16 lower_16, upper_16; if (pd->state & POSEIDON_STATE_DISCONNECT) return -ENODEV; mdelay(30); if (param == 0) { upper_16 = lower_16 = 0; } else { /* send 32 bit param as two 16 bit param,little endian */ lower_16 = (unsigned short)(param & 0xffff); upper_16 = (unsigned short)((param >> 16) & 0xffff); } ret = usb_control_msg(pd->udev, usb_rcvctrlpipe(pd->udev, 0), REQ_SET_CMD | cmdid, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, lower_16, upper_16, &data, sizeof(*cmd_status), USB_CTRL_GET_TIMEOUT); if (!ret) { return -ENXIO; } else { /* 1st 4 bytes into cmd_status */ memcpy((char *)cmd_status, &(data[0]), sizeof(*cmd_status)); } return 0; } /* * send get request to Poseidon firmware. */ s32 send_get_req(struct poseidon *pd, u8 cmdid, s32 param, void *buf, s32 *cmd_status, s32 datalen) { s32 ret; s8 data[128] = {}; u16 lower_16, upper_16; if (pd->state & POSEIDON_STATE_DISCONNECT) return -ENODEV; mdelay(30); if (param == 0) { upper_16 = lower_16 = 0; } else { /*send 32 bit param as two 16 bit param, little endian */ lower_16 = (unsigned short)(param & 0xffff); upper_16 = (unsigned short)((param >> 16) & 0xffff); } ret = usb_control_msg(pd->udev, usb_rcvctrlpipe(pd->udev, 0), REQ_GET_CMD | cmdid, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, lower_16, upper_16, &data, (datalen + sizeof(*cmd_status)), USB_CTRL_GET_TIMEOUT); if (ret < 0) { return -ENXIO; } else { /* 1st 4 bytes into cmd_status, remaining data into cmd_data */ memcpy((char *)cmd_status, &data[0], sizeof(*cmd_status)); memcpy((char *)buf, &data[sizeof(*cmd_status)], datalen); } return 0; } static int pm_notifier_block(struct notifier_block *nb, unsigned long event, void *dummy) { struct poseidon *pd = NULL; struct list_head *node, *next; switch (event) { case PM_POST_HIBERNATION: list_for_each_safe(node, next, &pd_device_list) { struct usb_device *udev; struct usb_interface *iface; int rc = 0; pd = container_of(node, struct poseidon, device_list); udev = pd->udev; iface = pd->interface; /* It will cause the system to reload the firmware */ rc = usb_lock_device_for_reset(udev, iface); if (rc >= 0) { usb_reset_device(udev); usb_unlock_device(udev); } } break; default: break; } log("event :%ld\n", event); return 0; } static struct notifier_block pm_notifer = { .notifier_call = pm_notifier_block, }; int set_tuner_mode(struct poseidon *pd, unsigned char mode) { s32 ret, cmd_status; if (pd->state & POSEIDON_STATE_DISCONNECT) return -ENODEV; ret = send_set_req(pd, TUNE_MODE_SELECT, mode, &cmd_status); if (ret || cmd_status) return -ENXIO; return 0; } void poseidon_delete(struct kref *kref) { struct poseidon *pd = container_of(kref, struct poseidon, kref); if (!pd) return; list_del_init(&pd->device_list); pd_dvb_usb_device_cleanup(pd); /* clean_audio_data(&pd->audio_data);*/ if (pd->udev) { usb_put_dev(pd->udev); pd->udev = NULL; } if (pd->interface) { usb_put_intf(pd->interface); pd->interface = NULL; } kfree(pd); log(); } static int firmware_download(struct usb_device *udev) { int ret = 0, actual_length; const struct firmware *fw = NULL; void *fwbuf = NULL; size_t fwlength = 0, offset; size_t max_packet_size; ret = request_firmware(&fw, firmware_name, &udev->dev); if (ret) { log("download err : %d", ret); return ret; } fwlength = fw->size; fwbuf = kzalloc(fwlength, GFP_KERNEL); if (!fwbuf) { ret = -ENOMEM; goto out; } memcpy(fwbuf, fw->data, fwlength); max_packet_size = udev->ep_out[0x1]->desc.wMaxPacketSize; log("\t\t download size : %d", (int)max_packet_size); for (offset = 0; offset < fwlength; offset += max_packet_size) { actual_length = 0; ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x01), /* ep 1 */ fwbuf + offset, min(max_packet_size, fwlength - offset), &actual_length, HZ * 10); if (ret) break; } kfree(fwbuf); out: release_firmware(fw); return ret; } static inline struct poseidon *get_pd(struct usb_interface *intf) { return usb_get_intfdata(intf); } #ifdef CONFIG_PM /* one-to-one map : poseidon{} <----> usb_device{}'s port */ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev) { pd->portnum = udev->portnum; } static inline int get_autopm_ref(struct poseidon *pd) { return pd->video_data.users + pd->vbi_data.users + pd->audio.users + atomic_read(&pd->dvb_data.users) + pd->radio_data.users; } /* fixup something for poseidon */ static inline struct poseidon *fixup(struct poseidon *pd) { int count; /* old udev and interface have gone, so put back reference . */ count = get_autopm_ref(pd); log("count : %d, ref count : %d", count, get_pm_count(pd)); while (count--) usb_autopm_put_interface(pd->interface); /*usb_autopm_set_interface(pd->interface); */ usb_put_dev(pd->udev); usb_put_intf(pd->interface); log("event : %d\n", pd->msg.event); return pd; } static struct poseidon *find_old_poseidon(struct usb_device *udev) { struct poseidon *pd; list_for_each_entry(pd, &pd_device_list, device_list) { if (pd->portnum == udev->portnum && in_hibernation(pd)) return fixup(pd); } return NULL; } /* Is the card working now ? */ static inline int is_working(struct poseidon *pd) { return get_pm_count(pd) > 0; } static int poseidon_suspend(struct usb_interface *intf, pm_message_t msg) { struct poseidon *pd = get_pd(intf); if (!pd) return 0; if (!is_working(pd)) { if (get_pm_count(pd) <= 0 && !in_hibernation(pd)) { pd->msg.event = PM_EVENT_AUTO_SUSPEND; pd->pm_resume = NULL; /* a good guard */ printk(KERN_DEBUG "\n\t+ TLG2300 auto suspend +\n\n"); } return 0; } pd->msg = msg; /* save it here */ logpm(pd); return pd->pm_suspend ? pd->pm_suspend(pd) : 0; } static int poseidon_resume(struct usb_interface *intf) { struct poseidon *pd = get_pd(intf); if (!pd) return 0; printk(KERN_DEBUG "\n\t ++ TLG2300 resume ++\n\n"); if (!is_working(pd)) { if (PM_EVENT_AUTO_SUSPEND == pd->msg.event) pd->msg = PMSG_ON; return 0; } if (in_hibernation(pd)) { logpm(pd); return 0; } logpm(pd); return pd->pm_resume ? pd->pm_resume(pd) : 0; } static void hibernation_resume(struct work_struct *w) { struct poseidon *pd = container_of(w, struct poseidon, pm_work); int count; pd->msg.event = 0; /* clear it here */ pd->state &= ~POSEIDON_STATE_DISCONNECT; /* set the new interface's reference */ count = get_autopm_ref(pd); while (count--) usb_autopm_get_interface(pd->interface); /* resume the context */ logpm(pd); if (pd->pm_resume) pd->pm_resume(pd); } #else /* CONFIG_PM is not enabled: */ static inline struct poseidon *find_old_poseidon(struct usb_device *udev) { return NULL; } static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev) { } #endif static bool check_firmware(struct usb_device *udev, int *down_firmware) { void *buf; int ret; struct cmd_firmware_vers_s *cmd_firm; buf = kzalloc(sizeof(*cmd_firm) + sizeof(u32), GFP_KERNEL); if (!buf) return -ENOMEM; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), REQ_GET_CMD | GET_FW_ID, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, buf, sizeof(*cmd_firm) + sizeof(u32), USB_CTRL_GET_TIMEOUT); kfree(buf); if (ret < 0) { *down_firmware = 1; return firmware_download(udev); } return ret; } static int poseidon_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct poseidon *pd = NULL; int ret = 0; int new_one = 0; /* download firmware */ check_firmware(udev, &ret); if (ret) return 0; /* Do I recovery from the hibernate ? */ pd = find_old_poseidon(udev); if (!pd) { pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) return -ENOMEM; kref_init(&pd->kref); set_map_flags(pd, udev); new_one = 1; } pd->udev = usb_get_dev(udev); pd->interface = usb_get_intf(interface); usb_set_intfdata(interface, pd); if (new_one) { struct device *dev = &interface->dev; logpm(pd); mutex_init(&pd->lock); /* register v4l2 device */ snprintf(pd->v4l2_dev.name, sizeof(pd->v4l2_dev.name), "%s %s", dev->driver->name, dev_name(dev)); ret = v4l2_device_register(NULL, &pd->v4l2_dev); /* register devices in directory /dev */ ret = pd_video_init(pd); poseidon_audio_init(pd); poseidon_fm_init(pd); pd_dvb_usb_device_init(pd); INIT_LIST_HEAD(&pd->device_list); list_add_tail(&pd->device_list, &pd_device_list); } device_init_wakeup(&udev->dev, 1); #ifdef CONFIG_PM pd->udev->autosuspend_delay = HZ * PM_SUSPEND_DELAY; usb_enable_autosuspend(pd->udev); if (in_hibernation(pd)) { INIT_WORK(&pd->pm_work, hibernation_resume); schedule_work(&pd->pm_work); } #endif return 0; } static void poseidon_disconnect(struct usb_interface *interface) { struct poseidon *pd = get_pd(interface); if (!pd) return; logpm(pd); if (in_hibernation(pd)) return; mutex_lock(&pd->lock); pd->state |= POSEIDON_STATE_DISCONNECT; mutex_unlock(&pd->lock); /* stop urb transferring */ stop_all_video_stream(pd); dvb_stop_streaming(&pd->dvb_data); /*unregister v4l2 device */ v4l2_device_unregister(&pd->v4l2_dev); lock_kernel(); { pd_dvb_usb_device_exit(pd); poseidon_fm_exit(pd); poseidon_audio_free(pd); pd_video_exit(pd); } unlock_kernel(); usb_set_intfdata(interface, NULL); kref_put(&pd->kref, poseidon_delete); } static struct usb_driver poseidon_driver = { .name = "poseidon", .probe = poseidon_probe, .disconnect = poseidon_disconnect, .id_table = id_table, #ifdef CONFIG_PM .suspend = poseidon_suspend, .resume = poseidon_resume, #endif .supports_autosuspend = 1, }; static int __init poseidon_init(void) { int ret; ret = usb_register(&poseidon_driver); if (ret) return ret; register_pm_notifier(&pm_notifer); return ret; } static void __exit poseidon_exit(void) { log(); unregister_pm_notifier(&pm_notifer); usb_deregister(&poseidon_driver); } module_init(poseidon_init); module_exit(poseidon_exit); MODULE_AUTHOR("Telegent Systems"); MODULE_DESCRIPTION("For tlg2300-based USB device "); MODULE_LICENSE("GPL");
gpl-2.0
raisul2010/samurai-kernel
drivers/staging/comedi/drivers/cb_pcidas64.c
761
124903
/* comedi/drivers/cb_pcidas64.c This is a driver for the ComputerBoards/MeasurementComputing PCI-DAS 64xx, 60xx, and 4020 cards. Author: Frank Mori Hess <fmhess@users.sourceforge.net> Copyright (C) 2001, 2002 Frank Mori Hess Thanks also go to the following people: Steve Rosenbluth, for providing the source code for his pci-das6402 driver, and source code for working QNX pci-6402 drivers by Greg Laird and Mariusz Bogacz. None of the code was used directly here, but it was useful as an additional source of documentation on how to program the boards. John Sims, for much testing and feedback on pcidas-4020 support. COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1997-8 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************/ /* Driver: cb_pcidas64 Description: MeasurementComputing PCI-DAS64xx, 60XX, and 4020 series with the PLX 9080 PCI controller Author: Frank Mori Hess <fmhess@users.sourceforge.net> Status: works Updated: 2002-10-09 Devices: [Measurement Computing] PCI-DAS6402/16 (cb_pcidas64), PCI-DAS6402/12, PCI-DAS64/M1/16, PCI-DAS64/M2/16, PCI-DAS64/M3/16, PCI-DAS6402/16/JR, PCI-DAS64/M1/16/JR, PCI-DAS64/M2/16/JR, PCI-DAS64/M3/16/JR, PCI-DAS64/M1/14, PCI-DAS64/M2/14, PCI-DAS64/M3/14, PCI-DAS6013, PCI-DAS6014, PCI-DAS6023, PCI-DAS6025, PCI-DAS6030, PCI-DAS6031, PCI-DAS6032, PCI-DAS6033, PCI-DAS6034, PCI-DAS6035, PCI-DAS6036, PCI-DAS6040, PCI-DAS6052, PCI-DAS6070, PCI-DAS6071, PCI-DAS4020/12 Configuration options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) These boards may be autocalibrated with the comedi_calibrate utility. To select the bnc trigger input on the 4020 (instead of the dio input), specify a nonzero channel in the chanspec. If you wish to use an external master clock on the 4020, you may do so by setting the scan_begin_src to TRIG_OTHER, and using an INSN_CONFIG_TIMER_1 configuration insn to configure the divisor to use for the external clock. Some devices are not identified because the PCI device IDs are not yet known. If you have such a board, please file a bug report at https://bugs.comedi.org. */ /* TODO: make it return error if user attempts an ai command that uses the external queue, and an ao command simultaneously user counter subdevice there are a number of boards this driver will support when they are fully released, but does not yet since the pci device id numbers are not yet available. support prescaled 100khz clock for slow pacing (not available on 6000 series?) make ao fifo size adjustable like ai fifo */ #include "../comedidev.h" #include <linux/delay.h> #include <linux/interrupt.h> #include <asm/system.h> #include "comedi_pci.h" #include "8253.h" #include "8255.h" #include "plx9080.h" #include "comedi_fc.h" #undef PCIDAS64_DEBUG /* disable debugging code */ /* #define PCIDAS64_DEBUG enable debugging code */ #ifdef PCIDAS64_DEBUG #define DEBUG_PRINT(format, args...) printk(format , ## args) #else #define DEBUG_PRINT(format, args...) #endif #define TIMER_BASE 25 /* 40MHz master clock */ #define PRESCALED_TIMER_BASE 10000 /* 100kHz 'prescaled' clock for slow aquisition, maybe I'll support this someday */ #define DMA_BUFFER_SIZE 0x1000 #define PCI_VENDOR_ID_COMPUTERBOARDS 0x1307 /* maximum value that can be loaded into board's 24-bit counters*/ static const int max_counter_value = 0xffffff; /* PCI-DAS64xxx base addresses */ /* indices of base address regions */ enum base_address_regions { PLX9080_BADDRINDEX = 0, MAIN_BADDRINDEX = 2, DIO_COUNTER_BADDRINDEX = 3, }; /* priv(dev)->main_iobase registers */ enum write_only_registers { INTR_ENABLE_REG = 0x0, /* interrupt enable register */ HW_CONFIG_REG = 0x2, /* hardware config register */ DAQ_SYNC_REG = 0xc, DAQ_ATRIG_LOW_4020_REG = 0xc, ADC_CONTROL0_REG = 0x10, /* adc control register 0 */ ADC_CONTROL1_REG = 0x12, /* adc control register 1 */ CALIBRATION_REG = 0x14, ADC_SAMPLE_INTERVAL_LOWER_REG = 0x16, /* lower 16 bits of adc sample interval counter */ ADC_SAMPLE_INTERVAL_UPPER_REG = 0x18, /* upper 8 bits of adc sample interval counter */ ADC_DELAY_INTERVAL_LOWER_REG = 0x1a, /* lower 16 bits of delay interval counter */ ADC_DELAY_INTERVAL_UPPER_REG = 0x1c, /* upper 8 bits of delay interval counter */ ADC_COUNT_LOWER_REG = 0x1e, /* lower 16 bits of hardware conversion/scan counter */ ADC_COUNT_UPPER_REG = 0x20, /* upper 8 bits of hardware conversion/scan counter */ ADC_START_REG = 0x22, /* software trigger to start aquisition */ ADC_CONVERT_REG = 0x24, /* initiates single conversion */ ADC_QUEUE_CLEAR_REG = 0x26, /* clears adc queue */ ADC_QUEUE_LOAD_REG = 0x28, /* loads adc queue */ ADC_BUFFER_CLEAR_REG = 0x2a, ADC_QUEUE_HIGH_REG = 0x2c, /* high channel for internal queue, use adc_chan_bits() inline above */ DAC_CONTROL0_REG = 0x50, /* dac control register 0 */ DAC_CONTROL1_REG = 0x52, /* dac control register 0 */ DAC_SAMPLE_INTERVAL_LOWER_REG = 0x54, /* lower 16 bits of dac sample interval counter */ DAC_SAMPLE_INTERVAL_UPPER_REG = 0x56, /* upper 8 bits of dac sample interval counter */ DAC_SELECT_REG = 0x60, DAC_START_REG = 0x64, DAC_BUFFER_CLEAR_REG = 0x66, /* clear dac buffer */ }; static inline unsigned int dac_convert_reg(unsigned int channel) { return 0x70 + (2 * (channel & 0x1)); } static inline unsigned int dac_lsb_4020_reg(unsigned int channel) { return 0x70 + (4 * (channel & 0x1)); } static inline unsigned int dac_msb_4020_reg(unsigned int channel) { return 0x72 + (4 * (channel & 0x1)); } enum read_only_registers { HW_STATUS_REG = 0x0, /* hardware status register, reading this apparently clears pending interrupts as well */ PIPE1_READ_REG = 0x4, ADC_READ_PNTR_REG = 0x8, LOWER_XFER_REG = 0x10, ADC_WRITE_PNTR_REG = 0xc, PREPOST_REG = 0x14, }; enum read_write_registers { I8255_4020_REG = 0x48, /* 8255 offset, for 4020 only */ ADC_QUEUE_FIFO_REG = 0x100, /* external channel/gain queue, uses same bits as ADC_QUEUE_LOAD_REG */ ADC_FIFO_REG = 0x200, /* adc data fifo */ DAC_FIFO_REG = 0x300, /* dac data fifo, has weird interactions with external channel queue */ }; /* priv(dev)->dio_counter_iobase registers */ enum dio_counter_registers { DIO_8255_OFFSET = 0x0, DO_REG = 0x20, DI_REG = 0x28, DIO_DIRECTION_60XX_REG = 0x40, DIO_DATA_60XX_REG = 0x48, }; /* bit definitions for write-only registers */ enum intr_enable_contents { ADC_INTR_SRC_MASK = 0x3, /* bits that set adc interrupt source */ ADC_INTR_QFULL_BITS = 0x0, /* interrupt fifo quater full */ ADC_INTR_EOC_BITS = 0x1, /* interrupt end of conversion */ ADC_INTR_EOSCAN_BITS = 0x2, /* interrupt end of scan */ ADC_INTR_EOSEQ_BITS = 0x3, /* interrupt end of sequence (probably wont use this it's pretty fancy) */ EN_ADC_INTR_SRC_BIT = 0x4, /* enable adc interrupt source */ EN_ADC_DONE_INTR_BIT = 0x8, /* enable adc aquisition done interrupt */ DAC_INTR_SRC_MASK = 0x30, DAC_INTR_QEMPTY_BITS = 0x0, DAC_INTR_HIGH_CHAN_BITS = 0x10, EN_DAC_INTR_SRC_BIT = 0x40, /* enable dac interrupt source */ EN_DAC_DONE_INTR_BIT = 0x80, EN_ADC_ACTIVE_INTR_BIT = 0x200, /* enable adc active interrupt */ EN_ADC_STOP_INTR_BIT = 0x400, /* enable adc stop trigger interrupt */ EN_DAC_ACTIVE_INTR_BIT = 0x800, /* enable dac active interrupt */ EN_DAC_UNDERRUN_BIT = 0x4000, /* enable dac underrun status bit */ EN_ADC_OVERRUN_BIT = 0x8000, /* enable adc overrun status bit */ }; enum hw_config_contents { MASTER_CLOCK_4020_MASK = 0x3, /* bits that specify master clock source for 4020 */ INTERNAL_CLOCK_4020_BITS = 0x1, /* use 40 MHz internal master clock for 4020 */ BNC_CLOCK_4020_BITS = 0x2, /* use BNC input for master clock */ EXT_CLOCK_4020_BITS = 0x3, /* use dio input for master clock */ EXT_QUEUE_BIT = 0x200, /* use external channel/gain queue (more versatile than internal queue) */ SLOW_DAC_BIT = 0x400, /* use 225 nanosec strobe when loading dac instead of 50 nanosec */ HW_CONFIG_DUMMY_BITS = 0x2000, /* bit with unknown function yet given as default value in pci-das64 manual */ DMA_CH_SELECT_BIT = 0x8000, /* bit selects channels 1/0 for analog input/output, otherwise 0/1 */ FIFO_SIZE_REG = 0x4, /* allows adjustment of fifo sizes */ DAC_FIFO_SIZE_MASK = 0xff00, /* bits that set dac fifo size */ DAC_FIFO_BITS = 0xf800, /* 8k sample ao fifo */ }; #define DAC_FIFO_SIZE 0x2000 enum daq_atrig_low_4020_contents { EXT_AGATE_BNC_BIT = 0x8000, /* use trig/ext clk bnc input for analog gate signal */ EXT_STOP_TRIG_BNC_BIT = 0x4000, /* use trig/ext clk bnc input for external stop trigger signal */ EXT_START_TRIG_BNC_BIT = 0x2000, /* use trig/ext clk bnc input for external start trigger signal */ }; static inline uint16_t analog_trig_low_threshold_bits(uint16_t threshold) { return threshold & 0xfff; } enum adc_control0_contents { ADC_GATE_SRC_MASK = 0x3, /* bits that select gate */ ADC_SOFT_GATE_BITS = 0x1, /* software gate */ ADC_EXT_GATE_BITS = 0x2, /* external digital gate */ ADC_ANALOG_GATE_BITS = 0x3, /* analog level gate */ ADC_GATE_LEVEL_BIT = 0x4, /* level-sensitive gate (for digital) */ ADC_GATE_POLARITY_BIT = 0x8, /* gate active low */ ADC_START_TRIG_SOFT_BITS = 0x10, ADC_START_TRIG_EXT_BITS = 0x20, ADC_START_TRIG_ANALOG_BITS = 0x30, ADC_START_TRIG_MASK = 0x30, ADC_START_TRIG_FALLING_BIT = 0x40, /* trig 1 uses falling edge */ ADC_EXT_CONV_FALLING_BIT = 0x800, /* external pacing uses falling edge */ ADC_SAMPLE_COUNTER_EN_BIT = 0x1000, /* enable hardware scan counter */ ADC_DMA_DISABLE_BIT = 0x4000, /* disables dma */ ADC_ENABLE_BIT = 0x8000, /* master adc enable */ }; enum adc_control1_contents { ADC_QUEUE_CONFIG_BIT = 0x1, /* should be set for boards with > 16 channels */ CONVERT_POLARITY_BIT = 0x10, EOC_POLARITY_BIT = 0x20, ADC_SW_GATE_BIT = 0x40, /* software gate of adc */ ADC_DITHER_BIT = 0x200, /* turn on extra noise for dithering */ RETRIGGER_BIT = 0x800, ADC_LO_CHANNEL_4020_MASK = 0x300, ADC_HI_CHANNEL_4020_MASK = 0xc00, TWO_CHANNEL_4020_BITS = 0x1000, /* two channel mode for 4020 */ FOUR_CHANNEL_4020_BITS = 0x2000, /* four channel mode for 4020 */ CHANNEL_MODE_4020_MASK = 0x3000, ADC_MODE_MASK = 0xf000, }; static inline uint16_t adc_lo_chan_4020_bits(unsigned int channel) { return (channel & 0x3) << 8; }; static inline uint16_t adc_hi_chan_4020_bits(unsigned int channel) { return (channel & 0x3) << 10; }; static inline uint16_t adc_mode_bits(unsigned int mode) { return (mode & 0xf) << 12; }; enum calibration_contents { SELECT_8800_BIT = 0x1, SELECT_8402_64XX_BIT = 0x2, SELECT_1590_60XX_BIT = 0x2, CAL_EN_64XX_BIT = 0x40, /* calibration enable for 64xx series */ SERIAL_DATA_IN_BIT = 0x80, SERIAL_CLOCK_BIT = 0x100, CAL_EN_60XX_BIT = 0x200, /* calibration enable for 60xx series */ CAL_GAIN_BIT = 0x800, }; /* calibration sources for 6025 are: * 0 : ground * 1 : 10V * 2 : 5V * 3 : 0.5V * 4 : 0.05V * 5 : ground * 6 : dac channel 0 * 7 : dac channel 1 */ static inline uint16_t adc_src_bits(unsigned int source) { return (source & 0xf) << 3; }; static inline uint16_t adc_convert_chan_4020_bits(unsigned int channel) { return (channel & 0x3) << 8; }; enum adc_queue_load_contents { UNIP_BIT = 0x800, /* unipolar/bipolar bit */ ADC_SE_DIFF_BIT = 0x1000, /* single-ended/ differential bit */ ADC_COMMON_BIT = 0x2000, /* non-referenced single-ended (common-mode input) */ QUEUE_EOSEQ_BIT = 0x4000, /* queue end of sequence */ QUEUE_EOSCAN_BIT = 0x8000, /* queue end of scan */ }; static inline uint16_t adc_chan_bits(unsigned int channel) { return channel & 0x3f; }; enum dac_control0_contents { DAC_ENABLE_BIT = 0x8000, /* dac controller enable bit */ DAC_CYCLIC_STOP_BIT = 0x4000, DAC_WAVEFORM_MODE_BIT = 0x100, DAC_EXT_UPDATE_FALLING_BIT = 0x80, DAC_EXT_UPDATE_ENABLE_BIT = 0x40, WAVEFORM_TRIG_MASK = 0x30, WAVEFORM_TRIG_DISABLED_BITS = 0x0, WAVEFORM_TRIG_SOFT_BITS = 0x10, WAVEFORM_TRIG_EXT_BITS = 0x20, WAVEFORM_TRIG_ADC1_BITS = 0x30, WAVEFORM_TRIG_FALLING_BIT = 0x8, WAVEFORM_GATE_LEVEL_BIT = 0x4, WAVEFORM_GATE_ENABLE_BIT = 0x2, WAVEFORM_GATE_SELECT_BIT = 0x1, }; enum dac_control1_contents { DAC_WRITE_POLARITY_BIT = 0x800, /* board-dependent setting */ DAC1_EXT_REF_BIT = 0x200, DAC0_EXT_REF_BIT = 0x100, DAC_OUTPUT_ENABLE_BIT = 0x80, /* dac output enable bit */ DAC_UPDATE_POLARITY_BIT = 0x40, /* board-dependent setting */ DAC_SW_GATE_BIT = 0x20, DAC1_UNIPOLAR_BIT = 0x8, DAC0_UNIPOLAR_BIT = 0x2, }; /* bit definitions for read-only registers */ enum hw_status_contents { DAC_UNDERRUN_BIT = 0x1, ADC_OVERRUN_BIT = 0x2, DAC_ACTIVE_BIT = 0x4, ADC_ACTIVE_BIT = 0x8, DAC_INTR_PENDING_BIT = 0x10, ADC_INTR_PENDING_BIT = 0x20, DAC_DONE_BIT = 0x40, ADC_DONE_BIT = 0x80, EXT_INTR_PENDING_BIT = 0x100, ADC_STOP_BIT = 0x200, }; static inline uint16_t pipe_full_bits(uint16_t hw_status_bits) { return (hw_status_bits >> 10) & 0x3; }; static inline unsigned int dma_chain_flag_bits(uint16_t prepost_bits) { return (prepost_bits >> 6) & 0x3; } static inline unsigned int adc_upper_read_ptr_code(uint16_t prepost_bits) { return (prepost_bits >> 12) & 0x3; } static inline unsigned int adc_upper_write_ptr_code(uint16_t prepost_bits) { return (prepost_bits >> 14) & 0x3; } /* I2C addresses for 4020 */ enum i2c_addresses { RANGE_CAL_I2C_ADDR = 0x20, CALDAC0_I2C_ADDR = 0xc, CALDAC1_I2C_ADDR = 0xd, }; enum range_cal_i2c_contents { ADC_SRC_4020_MASK = 0x70, /* bits that set what source the adc converter measures */ BNC_TRIG_THRESHOLD_0V_BIT = 0x80, /* make bnc trig/ext clock threshold 0V instead of 2.5V */ }; static inline uint8_t adc_src_4020_bits(unsigned int source) { return (source << 4) & ADC_SRC_4020_MASK; }; static inline uint8_t attenuate_bit(unsigned int channel) { /* attenuate channel (+-5V input range) */ return 1 << (channel & 0x3); }; /* analog input ranges for 64xx boards */ static const struct comedi_lrange ai_ranges_64xx = { 8, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25) } }; /* analog input ranges for 60xx boards */ static const struct comedi_lrange ai_ranges_60xx = { 4, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(0.5), BIP_RANGE(0.05), } }; /* analog input ranges for 6030, etc boards */ static const struct comedi_lrange ai_ranges_6030 = { 14, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2), BIP_RANGE(1), BIP_RANGE(0.5), BIP_RANGE(0.2), BIP_RANGE(0.1), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2), UNI_RANGE(1), UNI_RANGE(0.5), UNI_RANGE(0.2), UNI_RANGE(0.1), } }; /* analog input ranges for 6052, etc boards */ static const struct comedi_lrange ai_ranges_6052 = { 15, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1), BIP_RANGE(0.5), BIP_RANGE(0.25), BIP_RANGE(0.1), BIP_RANGE(0.05), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2), UNI_RANGE(1), UNI_RANGE(0.5), UNI_RANGE(0.2), UNI_RANGE(0.1), } }; /* analog input ranges for 4020 board */ static const struct comedi_lrange ai_ranges_4020 = { 2, { BIP_RANGE(5), BIP_RANGE(1), } }; /* analog output ranges */ static const struct comedi_lrange ao_ranges_64xx = { 4, { BIP_RANGE(5), BIP_RANGE(10), UNI_RANGE(5), UNI_RANGE(10), } }; static const int ao_range_code_64xx[] = { 0x0, 0x1, 0x2, 0x3, }; static const struct comedi_lrange ao_ranges_60xx = { 1, { BIP_RANGE(10), } }; static const int ao_range_code_60xx[] = { 0x0, }; static const struct comedi_lrange ao_ranges_6030 = { 2, { BIP_RANGE(10), UNI_RANGE(10), } }; static const int ao_range_code_6030[] = { 0x0, 0x2, }; static const struct comedi_lrange ao_ranges_4020 = { 2, { BIP_RANGE(5), BIP_RANGE(10), } }; static const int ao_range_code_4020[] = { 0x1, 0x0, }; enum register_layout { LAYOUT_60XX, LAYOUT_64XX, LAYOUT_4020, }; struct hw_fifo_info { unsigned int num_segments; unsigned int max_segment_length; unsigned int sample_packing_ratio; uint16_t fifo_size_reg_mask; }; struct pcidas64_board { const char *name; int device_id; /* pci device id */ int ai_se_chans; /* number of ai inputs in single-ended mode */ int ai_bits; /* analog input resolution */ int ai_speed; /* fastest conversion period in ns */ const struct comedi_lrange *ai_range_table; int ao_nchan; /* number of analog out channels */ int ao_bits; /* analog output resolution */ int ao_scan_speed; /* analog output speed (for a scan, not conversion) */ const struct comedi_lrange *ao_range_table; const int *ao_range_code; const struct hw_fifo_info *const ai_fifo; enum register_layout layout; /* different board families have slightly different registers */ unsigned has_8255:1; }; static const struct hw_fifo_info ai_fifo_4020 = { .num_segments = 2, .max_segment_length = 0x8000, .sample_packing_ratio = 2, .fifo_size_reg_mask = 0x7f, }; static const struct hw_fifo_info ai_fifo_64xx = { .num_segments = 4, .max_segment_length = 0x800, .sample_packing_ratio = 1, .fifo_size_reg_mask = 0x3f, }; static const struct hw_fifo_info ai_fifo_60xx = { .num_segments = 4, .max_segment_length = 0x800, .sample_packing_ratio = 1, .fifo_size_reg_mask = 0x7f, }; /* maximum number of dma transfers we will chain together into a ring * (and the maximum number of dma buffers we maintain) */ #define MAX_AI_DMA_RING_COUNT (0x80000 / DMA_BUFFER_SIZE) #define MIN_AI_DMA_RING_COUNT (0x10000 / DMA_BUFFER_SIZE) #define AO_DMA_RING_COUNT (0x10000 / DMA_BUFFER_SIZE) static inline unsigned int ai_dma_ring_count(struct pcidas64_board *board) { if (board->layout == LAYOUT_4020) return MAX_AI_DMA_RING_COUNT; else return MIN_AI_DMA_RING_COUNT; } static const int bytes_in_sample = 2; static const struct pcidas64_board pcidas64_boards[] = { { .name = "pci-das6402/16", .device_id = 0x1d, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ao_range_table = &ao_ranges_64xx, .ao_range_code = ao_range_code_64xx, .ai_fifo = &ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das6402/12", /* XXX check */ .device_id = 0x1e, .ai_se_chans = 64, .ai_bits = 12, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ao_range_table = &ao_ranges_64xx, .ao_range_code = ao_range_code_64xx, .ai_fifo = &ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m1/16", .device_id = 0x35, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 1000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ao_range_table = &ao_ranges_64xx, .ao_range_code = ao_range_code_64xx, .ai_fifo = &ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m2/16", .device_id = 0x36, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 500, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ao_range_table = &ao_ranges_64xx, .ao_range_code = ao_range_code_64xx, .ai_fifo = &ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m3/16", .device_id = 0x37, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 333, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ao_range_table = &ao_ranges_64xx, .ao_range_code = ao_range_code_64xx, .ai_fifo = &ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das6013", .device_id = 0x78, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 0, .ao_bits = 16, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6014", .device_id = 0x79, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 100000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6023", .device_id = 0x5d, .ai_se_chans = 16, .ai_bits = 12, .ai_speed = 5000, .ao_nchan = 0, .ao_scan_speed = 100000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 1, }, { .name = "pci-das6025", .device_id = 0x5e, .ai_se_chans = 16, .ai_bits = 12, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 100000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 1, }, { .name = "pci-das6030", .device_id = 0x5f, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 10000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6030, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6031", .device_id = 0x60, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 10000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6030, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6032", .device_id = 0x61, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 10000, .ao_nchan = 0, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6033", .device_id = 0x62, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 10000, .ao_nchan = 0, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6034", .device_id = 0x63, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 0, .ao_scan_speed = 0, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6035", .device_id = 0x64, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 100000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6036", .device_id = 0x6f, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 100000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6040", .device_id = 0x65, .ai_se_chans = 16, .ai_bits = 12, .ai_speed = 2000, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 1000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6052, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6052", .device_id = 0x66, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 3333, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 3333, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6052, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6070", .device_id = 0x67, .ai_se_chans = 16, .ai_bits = 12, .ai_speed = 800, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 1000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6052, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6071", .device_id = 0x68, .ai_se_chans = 64, .ai_bits = 12, .ai_speed = 800, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 1000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6052, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das4020/12", .device_id = 0x52, .ai_se_chans = 4, .ai_bits = 12, .ai_speed = 50, .ao_bits = 12, .ao_nchan = 2, .ao_scan_speed = 0, /* no hardware pacing on ao */ .layout = LAYOUT_4020, .ai_range_table = &ai_ranges_4020, .ao_range_table = &ao_ranges_4020, .ao_range_code = ao_range_code_4020, .ai_fifo = &ai_fifo_4020, .has_8255 = 1, }, #if 0 { .name = "pci-das6402/16/jr", .device_id = 0 /* XXX, */ .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 0, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m1/16/jr", .device_id = 0 /* XXX, */ .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 1000, .ao_nchan = 0, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m2/16/jr", .device_id = 0 /* XXX, */ .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 500, .ao_nchan = 0, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m3/16/jr", .device_id = 0 /* XXX, */ .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 333, .ao_nchan = 0, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m1/14", .device_id = 0, /* XXX */ .ai_se_chans = 64, .ai_bits = 14, .ai_speed = 1000, .ao_nchan = 2, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m2/14", .device_id = 0, /* XXX */ .ai_se_chans = 64, .ai_bits = 14, .ai_speed = 500, .ao_nchan = 2, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m3/14", .device_id = 0, /* XXX */ .ai_se_chans = 64, .ai_bits = 14, .ai_speed = 333, .ao_nchan = 2, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, #endif }; static DEFINE_PCI_DEVICE_TABLE(pcidas64_pci_table) = { { PCI_VENDOR_ID_COMPUTERBOARDS, 0x001d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x001e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0035, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0036, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0037, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x005d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x005e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x005f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0061, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0062, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0063, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0066, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0067, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0068, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x006f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0078, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_COMPUTERBOARDS, 0x0079, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 0} }; MODULE_DEVICE_TABLE(pci, pcidas64_pci_table); static inline struct pcidas64_board *board(const struct comedi_device *dev) { return (struct pcidas64_board *)dev->board_ptr; } static inline unsigned short se_diff_bit_6xxx(struct comedi_device *dev, int use_differential) { if ((board(dev)->layout == LAYOUT_64XX && !use_differential) || (board(dev)->layout == LAYOUT_60XX && use_differential)) return ADC_SE_DIFF_BIT; else return 0; }; struct ext_clock_info { unsigned int divisor; /* master clock divisor to use for scans with external master clock */ unsigned int chanspec; /* chanspec for master clock input when used as scan begin src */ }; /* this structure is for data unique to this hardware driver. */ struct pcidas64_private { struct pci_dev *hw_dev; /* pointer to board's pci_dev struct */ /* base addresses (physical) */ resource_size_t plx9080_phys_iobase; resource_size_t main_phys_iobase; resource_size_t dio_counter_phys_iobase; /* base addresses (ioremapped) */ void __iomem *plx9080_iobase; void __iomem *main_iobase; void __iomem *dio_counter_iobase; /* local address (used by dma controller) */ uint32_t local0_iobase; uint32_t local1_iobase; volatile unsigned int ai_count; /* number of analog input samples remaining */ uint16_t *ai_buffer[MAX_AI_DMA_RING_COUNT]; /* dma buffers for analog input */ dma_addr_t ai_buffer_bus_addr[MAX_AI_DMA_RING_COUNT]; /* physical addresses of ai dma buffers */ struct plx_dma_desc *ai_dma_desc; /* array of ai dma descriptors read by plx9080, allocated to get proper alignment */ dma_addr_t ai_dma_desc_bus_addr; /* physical address of ai dma descriptor array */ volatile unsigned int ai_dma_index; /* index of the ai dma descriptor/buffer that is currently being used */ uint16_t *ao_buffer[AO_DMA_RING_COUNT]; /* dma buffers for analog output */ dma_addr_t ao_buffer_bus_addr[AO_DMA_RING_COUNT]; /* physical addresses of ao dma buffers */ struct plx_dma_desc *ao_dma_desc; dma_addr_t ao_dma_desc_bus_addr; volatile unsigned int ao_dma_index; /* keeps track of buffer where the next ao sample should go */ volatile unsigned long ao_count; /* number of analog output samples remaining */ volatile unsigned int ao_value[2]; /* remember what the analog outputs are set to, to allow readback */ unsigned int hw_revision; /* stc chip hardware revision number */ volatile unsigned int intr_enable_bits; /* last bits sent to INTR_ENABLE_REG register */ volatile uint16_t adc_control1_bits; /* last bits sent to ADC_CONTROL1_REG register */ volatile uint16_t fifo_size_bits; /* last bits sent to FIFO_SIZE_REG register */ volatile uint16_t hw_config_bits; /* last bits sent to HW_CONFIG_REG register */ volatile uint16_t dac_control1_bits; volatile uint32_t plx_control_bits; /* last bits written to plx9080 control register */ volatile uint32_t plx_intcsr_bits; /* last bits written to plx interrupt control and status register */ volatile int calibration_source; /* index of calibration source readable through ai ch0 */ volatile uint8_t i2c_cal_range_bits; /* bits written to i2c calibration/range register */ volatile unsigned int ext_trig_falling; /* configure digital triggers to trigger on falling edge */ /* states of various devices stored to enable read-back */ unsigned int ad8402_state[2]; unsigned int caldac_state[8]; volatile short ai_cmd_running; unsigned int ai_fifo_segment_length; struct ext_clock_info ext_clock; short ao_bounce_buffer[DAC_FIFO_SIZE]; }; /* inline function that makes it easier to * access the private structure. */ static inline struct pcidas64_private *priv(struct comedi_device *dev) { return dev->private; } /* * The comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int attach(struct comedi_device *dev, struct comedi_devconfig *it); static int detach(struct comedi_device *dev); static struct comedi_driver driver_cb_pcidas = { .driver_name = "cb_pcidas64", .module = THIS_MODULE, .attach = attach, .detach = detach, }; static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ao_readback_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *subdev, unsigned int trig_num); static int ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static irqreturn_t handle_interrupt(int irq, void *d); static int ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int dio_callback(int dir, int port, int data, unsigned long arg); static int dio_callback_4020(int dir, int port, int data, unsigned long arg); static int di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dio_60xx_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dio_60xx_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int calib_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int calib_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ad8402_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static void ad8402_write(struct comedi_device *dev, unsigned int channel, unsigned int value); static int ad8402_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static void check_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); static unsigned int get_divisor(unsigned int ns, unsigned int flags); static void i2c_write(struct comedi_device *dev, unsigned int address, const uint8_t * data, unsigned int length); static void caldac_write(struct comedi_device *dev, unsigned int channel, unsigned int value); static int caldac_8800_write(struct comedi_device *dev, unsigned int address, uint8_t value); /* static int dac_1590_write(struct comedi_device *dev, unsigned int dac_a, unsigned int dac_b); */ static int caldac_i2c_write(struct comedi_device *dev, unsigned int caldac_channel, unsigned int value); static void abort_dma(struct comedi_device *dev, unsigned int channel); static void disable_plx_interrupts(struct comedi_device *dev); static int set_ai_fifo_size(struct comedi_device *dev, unsigned int num_samples); static unsigned int ai_fifo_size(struct comedi_device *dev); static int set_ai_fifo_segment_length(struct comedi_device *dev, unsigned int num_entries); static void disable_ai_pacing(struct comedi_device *dev); static void disable_ai_interrupts(struct comedi_device *dev); static void enable_ai_interrupts(struct comedi_device *dev, const struct comedi_cmd *cmd); static unsigned int get_ao_divisor(unsigned int ns, unsigned int flags); static void load_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd); COMEDI_PCI_INITCLEANUP(driver_cb_pcidas, pcidas64_pci_table); static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev, unsigned int range_index) { const struct comedi_krange *range = &board(dev)->ai_range_table->range[range_index]; unsigned int bits = 0; switch (range->max) { case 10000000: bits = 0x000; break; case 5000000: bits = 0x100; break; case 2000000: case 2500000: bits = 0x200; break; case 1000000: case 1250000: bits = 0x300; break; case 500000: bits = 0x400; break; case 200000: case 250000: bits = 0x500; break; case 100000: bits = 0x600; break; case 50000: bits = 0x700; break; default: comedi_error(dev, "bug! in ai_range_bits_6xxx"); break; } if (range->min == 0) bits += 0x900; return bits; } static unsigned int hw_revision(const struct comedi_device *dev, uint16_t hw_status_bits) { if (board(dev)->layout == LAYOUT_4020) return (hw_status_bits >> 13) & 0x7; return (hw_status_bits >> 12) & 0xf; } static void set_dac_range_bits(struct comedi_device *dev, volatile uint16_t * bits, unsigned int channel, unsigned int range) { unsigned int code = board(dev)->ao_range_code[range]; if (channel > 1) comedi_error(dev, "bug! bad channel?"); if (code & ~0x3) comedi_error(dev, "bug! bad range code?"); *bits &= ~(0x3 << (2 * channel)); *bits |= code << (2 * channel); }; static inline int ao_cmd_is_supported(const struct pcidas64_board *board) { return board->ao_nchan && board->layout != LAYOUT_4020; } /* initialize plx9080 chip */ static void init_plx9080(struct comedi_device *dev) { uint32_t bits; void __iomem *plx_iobase = priv(dev)->plx9080_iobase; priv(dev)->plx_control_bits = readl(priv(dev)->plx9080_iobase + PLX_CONTROL_REG); /* plx9080 dump */ DEBUG_PRINT(" plx interrupt status 0x%x\n", readl(plx_iobase + PLX_INTRCS_REG)); DEBUG_PRINT(" plx id bits 0x%x\n", readl(plx_iobase + PLX_ID_REG)); DEBUG_PRINT(" plx control reg 0x%x\n", priv(dev)->plx_control_bits); DEBUG_PRINT(" plx mode/arbitration reg 0x%x\n", readl(plx_iobase + PLX_MARB_REG)); DEBUG_PRINT(" plx region0 reg 0x%x\n", readl(plx_iobase + PLX_REGION0_REG)); DEBUG_PRINT(" plx region1 reg 0x%x\n", readl(plx_iobase + PLX_REGION1_REG)); DEBUG_PRINT(" plx revision 0x%x\n", readl(plx_iobase + PLX_REVISION_REG)); DEBUG_PRINT(" plx dma channel 0 mode 0x%x\n", readl(plx_iobase + PLX_DMA0_MODE_REG)); DEBUG_PRINT(" plx dma channel 1 mode 0x%x\n", readl(plx_iobase + PLX_DMA1_MODE_REG)); DEBUG_PRINT(" plx dma channel 0 pci address 0x%x\n", readl(plx_iobase + PLX_DMA0_PCI_ADDRESS_REG)); DEBUG_PRINT(" plx dma channel 0 local address 0x%x\n", readl(plx_iobase + PLX_DMA0_LOCAL_ADDRESS_REG)); DEBUG_PRINT(" plx dma channel 0 transfer size 0x%x\n", readl(plx_iobase + PLX_DMA0_TRANSFER_SIZE_REG)); DEBUG_PRINT(" plx dma channel 0 descriptor 0x%x\n", readl(plx_iobase + PLX_DMA0_DESCRIPTOR_REG)); DEBUG_PRINT(" plx dma channel 0 command status 0x%x\n", readb(plx_iobase + PLX_DMA0_CS_REG)); DEBUG_PRINT(" plx dma channel 0 threshold 0x%x\n", readl(plx_iobase + PLX_DMA0_THRESHOLD_REG)); DEBUG_PRINT(" plx bigend 0x%x\n", readl(plx_iobase + PLX_BIGEND_REG)); #ifdef __BIG_ENDIAN bits = BIGEND_DMA0 | BIGEND_DMA1; #else bits = 0; #endif writel(bits, priv(dev)->plx9080_iobase + PLX_BIGEND_REG); disable_plx_interrupts(dev); abort_dma(dev, 0); abort_dma(dev, 1); /* configure dma0 mode */ bits = 0; /* enable ready input, not sure if this is necessary */ bits |= PLX_DMA_EN_READYIN_BIT; /* enable bterm, not sure if this is necessary */ bits |= PLX_EN_BTERM_BIT; /* enable dma chaining */ bits |= PLX_EN_CHAIN_BIT; /* enable interrupt on dma done (probably don't need this, since chain never finishes) */ bits |= PLX_EN_DMA_DONE_INTR_BIT; /* don't increment local address during transfers (we are transferring from a fixed fifo register) */ bits |= PLX_LOCAL_ADDR_CONST_BIT; /* route dma interrupt to pci bus */ bits |= PLX_DMA_INTR_PCI_BIT; /* enable demand mode */ bits |= PLX_DEMAND_MODE_BIT; /* enable local burst mode */ bits |= PLX_DMA_LOCAL_BURST_EN_BIT; /* 4020 uses 32 bit dma */ if (board(dev)->layout == LAYOUT_4020) { bits |= PLX_LOCAL_BUS_32_WIDE_BITS; } else { /* localspace0 bus is 16 bits wide */ bits |= PLX_LOCAL_BUS_16_WIDE_BITS; } writel(bits, plx_iobase + PLX_DMA1_MODE_REG); if (ao_cmd_is_supported(board(dev))) writel(bits, plx_iobase + PLX_DMA0_MODE_REG); /* enable interrupts on plx 9080 */ priv(dev)->plx_intcsr_bits |= ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE | ICS_DMA0_E | ICS_DMA1_E; writel(priv(dev)->plx_intcsr_bits, priv(dev)->plx9080_iobase + PLX_INTRCS_REG); } /* Allocate and initialize the subdevice structures. */ static int setup_subdevices(struct comedi_device *dev) { struct comedi_subdevice *s; void __iomem *dio_8255_iobase; int i; if (alloc_subdevices(dev, 10) < 0) return -ENOMEM; s = dev->subdevices + 0; /* analog input subdevice */ dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DITHER | SDF_CMD_READ; if (board(dev)->layout == LAYOUT_60XX) s->subdev_flags |= SDF_COMMON | SDF_DIFF; else if (board(dev)->layout == LAYOUT_64XX) s->subdev_flags |= SDF_DIFF; /* XXX Number of inputs in differential mode is ignored */ s->n_chan = board(dev)->ai_se_chans; s->len_chanlist = 0x2000; s->maxdata = (1 << board(dev)->ai_bits) - 1; s->range_table = board(dev)->ai_range_table; s->insn_read = ai_rinsn; s->insn_config = ai_config_insn; s->do_cmd = ai_cmd; s->do_cmdtest = ai_cmdtest; s->cancel = ai_cancel; if (board(dev)->layout == LAYOUT_4020) { uint8_t data; /* set adc to read from inputs (not internal calibration sources) */ priv(dev)->i2c_cal_range_bits = adc_src_4020_bits(4); /* set channels to +-5 volt input ranges */ for (i = 0; i < s->n_chan; i++) priv(dev)->i2c_cal_range_bits |= attenuate_bit(i); data = priv(dev)->i2c_cal_range_bits; i2c_write(dev, RANGE_CAL_I2C_ADDR, &data, sizeof(data)); } /* analog output subdevice */ s = dev->subdevices + 1; if (board(dev)->ao_nchan) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE; s->n_chan = board(dev)->ao_nchan; s->maxdata = (1 << board(dev)->ao_bits) - 1; s->range_table = board(dev)->ao_range_table; s->insn_read = ao_readback_insn; s->insn_write = ao_winsn; if (ao_cmd_is_supported(board(dev))) { dev->write_subdev = s; s->do_cmdtest = ao_cmdtest; s->do_cmd = ao_cmd; s->len_chanlist = board(dev)->ao_nchan; s->cancel = ao_cancel; } } else { s->type = COMEDI_SUBD_UNUSED; } /* digital input */ s = dev->subdevices + 2; if (board(dev)->layout == LAYOUT_64XX) { s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = di_rbits; } else s->type = COMEDI_SUBD_UNUSED; /* digital output */ if (board(dev)->layout == LAYOUT_64XX) { s = dev->subdevices + 3; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = do_wbits; } else s->type = COMEDI_SUBD_UNUSED; /* 8255 */ s = dev->subdevices + 4; if (board(dev)->has_8255) { if (board(dev)->layout == LAYOUT_4020) { dio_8255_iobase = priv(dev)->main_iobase + I8255_4020_REG; subdev_8255_init(dev, s, dio_callback_4020, (unsigned long)dio_8255_iobase); } else { dio_8255_iobase = priv(dev)->dio_counter_iobase + DIO_8255_OFFSET; subdev_8255_init(dev, s, dio_callback, (unsigned long)dio_8255_iobase); } } else s->type = COMEDI_SUBD_UNUSED; /* 8 channel dio for 60xx */ s = dev->subdevices + 5; if (board(dev)->layout == LAYOUT_60XX) { s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 8; s->maxdata = 1; s->range_table = &range_digital; s->insn_config = dio_60xx_config_insn; s->insn_bits = dio_60xx_wbits; } else s->type = COMEDI_SUBD_UNUSED; /* caldac */ s = dev->subdevices + 6; s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = 8; if (board(dev)->layout == LAYOUT_4020) s->maxdata = 0xfff; else s->maxdata = 0xff; s->insn_read = calib_read_insn; s->insn_write = calib_write_insn; for (i = 0; i < s->n_chan; i++) caldac_write(dev, i, s->maxdata / 2); /* 2 channel ad8402 potentiometer */ s = dev->subdevices + 7; if (board(dev)->layout == LAYOUT_64XX) { s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = 2; s->insn_read = ad8402_read_insn; s->insn_write = ad8402_write_insn; s->maxdata = 0xff; for (i = 0; i < s->n_chan; i++) ad8402_write(dev, i, s->maxdata / 2); } else s->type = COMEDI_SUBD_UNUSED; /* serial EEPROM, if present */ s = dev->subdevices + 8; if (readl(priv(dev)->plx9080_iobase + PLX_CONTROL_REG) & CTL_EECHK) { s->type = COMEDI_SUBD_MEMORY; s->subdev_flags = SDF_READABLE | SDF_INTERNAL; s->n_chan = 128; s->maxdata = 0xffff; s->insn_read = eeprom_read_insn; } else s->type = COMEDI_SUBD_UNUSED; /* user counter subd XXX */ s = dev->subdevices + 9; s->type = COMEDI_SUBD_UNUSED; return 0; } static void disable_plx_interrupts(struct comedi_device *dev) { priv(dev)->plx_intcsr_bits = 0; writel(priv(dev)->plx_intcsr_bits, priv(dev)->plx9080_iobase + PLX_INTRCS_REG); } static void init_stc_registers(struct comedi_device *dev) { uint16_t bits; unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); /* bit should be set for 6025, although docs say boards with <= 16 chans should be cleared XXX */ if (1) priv(dev)->adc_control1_bits |= ADC_QUEUE_CONFIG_BIT; writew(priv(dev)->adc_control1_bits, priv(dev)->main_iobase + ADC_CONTROL1_REG); /* 6402/16 manual says this register must be initialized to 0xff? */ writew(0xff, priv(dev)->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG); bits = SLOW_DAC_BIT | DMA_CH_SELECT_BIT; if (board(dev)->layout == LAYOUT_4020) bits |= INTERNAL_CLOCK_4020_BITS; priv(dev)->hw_config_bits |= bits; writew(priv(dev)->hw_config_bits, priv(dev)->main_iobase + HW_CONFIG_REG); writew(0, priv(dev)->main_iobase + DAQ_SYNC_REG); writew(0, priv(dev)->main_iobase + CALIBRATION_REG); spin_unlock_irqrestore(&dev->spinlock, flags); /* set fifos to maximum size */ priv(dev)->fifo_size_bits |= DAC_FIFO_BITS; set_ai_fifo_segment_length(dev, board(dev)->ai_fifo->max_segment_length); priv(dev)->dac_control1_bits = DAC_OUTPUT_ENABLE_BIT; priv(dev)->intr_enable_bits = /* EN_DAC_INTR_SRC_BIT | DAC_INTR_QEMPTY_BITS | */ EN_DAC_DONE_INTR_BIT | EN_DAC_UNDERRUN_BIT; writew(priv(dev)->intr_enable_bits, priv(dev)->main_iobase + INTR_ENABLE_REG); disable_ai_pacing(dev); }; static int alloc_and_init_dma_members(struct comedi_device *dev) { int i; /* alocate pci dma buffers */ for (i = 0; i < ai_dma_ring_count(board(dev)); i++) { priv(dev)->ai_buffer[i] = pci_alloc_consistent(priv(dev)->hw_dev, DMA_BUFFER_SIZE, &priv(dev)->ai_buffer_bus_addr[i]); if (priv(dev)->ai_buffer[i] == NULL) return -ENOMEM; } for (i = 0; i < AO_DMA_RING_COUNT; i++) { if (ao_cmd_is_supported(board(dev))) { priv(dev)->ao_buffer[i] = pci_alloc_consistent(priv(dev)->hw_dev, DMA_BUFFER_SIZE, &priv(dev)-> ao_buffer_bus_addr[i]); if (priv(dev)->ao_buffer[i] == NULL) return -ENOMEM; } } /* allocate dma descriptors */ priv(dev)->ai_dma_desc = pci_alloc_consistent(priv(dev)->hw_dev, sizeof(struct plx_dma_desc) * ai_dma_ring_count(board(dev)), &priv(dev)->ai_dma_desc_bus_addr); if (priv(dev)->ai_dma_desc == NULL) return -ENOMEM; DEBUG_PRINT("ai dma descriptors start at bus addr 0x%x\n", priv(dev)->ai_dma_desc_bus_addr); if (ao_cmd_is_supported(board(dev))) { priv(dev)->ao_dma_desc = pci_alloc_consistent(priv(dev)->hw_dev, sizeof(struct plx_dma_desc) * AO_DMA_RING_COUNT, &priv(dev)->ao_dma_desc_bus_addr); if (priv(dev)->ao_dma_desc == NULL) return -ENOMEM; DEBUG_PRINT("ao dma descriptors start at bus addr 0x%x\n", priv(dev)->ao_dma_desc_bus_addr); } /* initialize dma descriptors */ for (i = 0; i < ai_dma_ring_count(board(dev)); i++) { priv(dev)->ai_dma_desc[i].pci_start_addr = cpu_to_le32(priv(dev)->ai_buffer_bus_addr[i]); if (board(dev)->layout == LAYOUT_4020) priv(dev)->ai_dma_desc[i].local_start_addr = cpu_to_le32(priv(dev)->local1_iobase + ADC_FIFO_REG); else priv(dev)->ai_dma_desc[i].local_start_addr = cpu_to_le32(priv(dev)->local0_iobase + ADC_FIFO_REG); priv(dev)->ai_dma_desc[i].transfer_size = cpu_to_le32(0); priv(dev)->ai_dma_desc[i].next = cpu_to_le32((priv(dev)->ai_dma_desc_bus_addr + ((i + 1) % ai_dma_ring_count (board (dev))) * sizeof(priv(dev)->ai_dma_desc[0])) | PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI); } if (ao_cmd_is_supported(board(dev))) { for (i = 0; i < AO_DMA_RING_COUNT; i++) { priv(dev)->ao_dma_desc[i].pci_start_addr = cpu_to_le32(priv(dev)->ao_buffer_bus_addr[i]); priv(dev)->ao_dma_desc[i].local_start_addr = cpu_to_le32(priv(dev)->local0_iobase + DAC_FIFO_REG); priv(dev)->ao_dma_desc[i].transfer_size = cpu_to_le32(0); priv(dev)->ao_dma_desc[i].next = cpu_to_le32((priv(dev)->ao_dma_desc_bus_addr + ((i + 1) % (AO_DMA_RING_COUNT)) * sizeof(priv(dev)->ao_dma_desc[0])) | PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT); } } return 0; } static inline void warn_external_queue(struct comedi_device *dev) { comedi_error(dev, "AO command and AI external channel queue cannot be used simultaneously."); comedi_error(dev, "Use internal AI channel queue (channels must be consecutive and use same range/aref)"); } /* * Attach is called by the Comedi core to configure the driver * for a particular board. */ static int attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev; int index; uint32_t local_range, local_decode; int retval; printk("comedi%d: cb_pcidas64\n", dev->minor); /* * Allocate the private structure area. */ if (alloc_private(dev, sizeof(struct pcidas64_private)) < 0) return -ENOMEM; /* * Probe the device to determine what device in the series it is. */ for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); pcidev != NULL; pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) { /* is it not a computer boards card? */ if (pcidev->vendor != PCI_VENDOR_ID_COMPUTERBOARDS) continue; /* loop through cards supported by this driver */ for (index = 0; index < ARRAY_SIZE(pcidas64_boards); index++) { if (pcidas64_boards[index].device_id != pcidev->device) continue; /* was a particular bus/slot requested? */ if (it->options[0] || it->options[1]) { /* are we on the wrong bus/slot? */ if (pcidev->bus->number != it->options[0] || PCI_SLOT(pcidev->devfn) != it->options[1]) { continue; } } priv(dev)->hw_dev = pcidev; dev->board_ptr = pcidas64_boards + index; break; } if (dev->board_ptr) break; } if (dev->board_ptr == NULL) { printk ("No supported ComputerBoards/MeasurementComputing card found\n"); return -EIO; } printk("Found %s on bus %i, slot %i\n", board(dev)->name, pcidev->bus->number, PCI_SLOT(pcidev->devfn)); if (comedi_pci_enable(pcidev, driver_cb_pcidas.driver_name)) { printk(KERN_WARNING " failed to enable PCI device and request regions\n"); return -EIO; } pci_set_master(pcidev); /* Initialize dev->board_name */ dev->board_name = board(dev)->name; priv(dev)->plx9080_phys_iobase = pci_resource_start(pcidev, PLX9080_BADDRINDEX); priv(dev)->main_phys_iobase = pci_resource_start(pcidev, MAIN_BADDRINDEX); priv(dev)->dio_counter_phys_iobase = pci_resource_start(pcidev, DIO_COUNTER_BADDRINDEX); /* remap, won't work with 2.0 kernels but who cares */ priv(dev)->plx9080_iobase = ioremap(priv(dev)->plx9080_phys_iobase, pci_resource_len(pcidev, PLX9080_BADDRINDEX)); priv(dev)->main_iobase = ioremap(priv(dev)->main_phys_iobase, pci_resource_len(pcidev, MAIN_BADDRINDEX)); priv(dev)->dio_counter_iobase = ioremap(priv(dev)->dio_counter_phys_iobase, pci_resource_len(pcidev, DIO_COUNTER_BADDRINDEX)); if (!priv(dev)->plx9080_iobase || !priv(dev)->main_iobase || !priv(dev)->dio_counter_iobase) { printk(" failed to remap io memory\n"); return -ENOMEM; } DEBUG_PRINT(" plx9080 remapped to 0x%p\n", priv(dev)->plx9080_iobase); DEBUG_PRINT(" main remapped to 0x%p\n", priv(dev)->main_iobase); DEBUG_PRINT(" diocounter remapped to 0x%p\n", priv(dev)->dio_counter_iobase); /* figure out what local addresses are */ local_range = readl(priv(dev)->plx9080_iobase + PLX_LAS0RNG_REG) & LRNG_MEM_MASK; local_decode = readl(priv(dev)->plx9080_iobase + PLX_LAS0MAP_REG) & local_range & LMAP_MEM_MASK; priv(dev)->local0_iobase = ((uint32_t) priv(dev)->main_phys_iobase & ~local_range) | local_decode; local_range = readl(priv(dev)->plx9080_iobase + PLX_LAS1RNG_REG) & LRNG_MEM_MASK; local_decode = readl(priv(dev)->plx9080_iobase + PLX_LAS1MAP_REG) & local_range & LMAP_MEM_MASK; priv(dev)->local1_iobase = ((uint32_t) priv(dev)->dio_counter_phys_iobase & ~local_range) | local_decode; DEBUG_PRINT(" local 0 io addr 0x%x\n", priv(dev)->local0_iobase); DEBUG_PRINT(" local 1 io addr 0x%x\n", priv(dev)->local1_iobase); retval = alloc_and_init_dma_members(dev); if (retval < 0) return retval; priv(dev)->hw_revision = hw_revision(dev, readw(priv(dev)->main_iobase + HW_STATUS_REG)); printk(" stc hardware revision %i\n", priv(dev)->hw_revision); init_plx9080(dev); init_stc_registers(dev); /* get irq */ if (request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED, "cb_pcidas64", dev)) { printk(" unable to allocate irq %u\n", pcidev->irq); return -EINVAL; } dev->irq = pcidev->irq; printk(" irq %u\n", dev->irq); retval = setup_subdevices(dev); if (retval < 0) return retval; return 0; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int detach(struct comedi_device *dev) { unsigned int i; printk("comedi%d: cb_pcidas: remove\n", dev->minor); if (dev->irq) free_irq(dev->irq, dev); if (priv(dev)) { if (priv(dev)->hw_dev) { if (priv(dev)->plx9080_iobase) { disable_plx_interrupts(dev); iounmap(priv(dev)->plx9080_iobase); } if (priv(dev)->main_iobase) iounmap(priv(dev)->main_iobase); if (priv(dev)->dio_counter_iobase) iounmap(priv(dev)->dio_counter_iobase); /* free pci dma buffers */ for (i = 0; i < ai_dma_ring_count(board(dev)); i++) { if (priv(dev)->ai_buffer[i]) pci_free_consistent(priv(dev)->hw_dev, DMA_BUFFER_SIZE, priv(dev)-> ai_buffer[i], priv (dev)->ai_buffer_bus_addr [i]); } for (i = 0; i < AO_DMA_RING_COUNT; i++) { if (priv(dev)->ao_buffer[i]) pci_free_consistent(priv(dev)->hw_dev, DMA_BUFFER_SIZE, priv(dev)-> ao_buffer[i], priv (dev)->ao_buffer_bus_addr [i]); } /* free dma descriptors */ if (priv(dev)->ai_dma_desc) pci_free_consistent(priv(dev)->hw_dev, sizeof(struct plx_dma_desc) * ai_dma_ring_count(board (dev)), priv(dev)->ai_dma_desc, priv(dev)-> ai_dma_desc_bus_addr); if (priv(dev)->ao_dma_desc) pci_free_consistent(priv(dev)->hw_dev, sizeof(struct plx_dma_desc) * AO_DMA_RING_COUNT, priv(dev)->ao_dma_desc, priv(dev)-> ao_dma_desc_bus_addr); if (priv(dev)->main_phys_iobase) comedi_pci_disable(priv(dev)->hw_dev); pci_dev_put(priv(dev)->hw_dev); } } if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 4); return 0; } static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int bits = 0, n, i; unsigned int channel, range, aref; unsigned long flags; static const int timeout = 100; DEBUG_PRINT("chanspec 0x%x\n", insn->chanspec); channel = CR_CHAN(insn->chanspec); range = CR_RANGE(insn->chanspec); aref = CR_AREF(insn->chanspec); /* disable card's analog input interrupt sources and pacing */ /* 4020 generates dac done interrupts even though they are disabled */ disable_ai_pacing(dev); spin_lock_irqsave(&dev->spinlock, flags); if (insn->chanspec & CR_ALT_FILTER) priv(dev)->adc_control1_bits |= ADC_DITHER_BIT; else priv(dev)->adc_control1_bits &= ~ADC_DITHER_BIT; writew(priv(dev)->adc_control1_bits, priv(dev)->main_iobase + ADC_CONTROL1_REG); spin_unlock_irqrestore(&dev->spinlock, flags); if (board(dev)->layout != LAYOUT_4020) { /* use internal queue */ priv(dev)->hw_config_bits &= ~EXT_QUEUE_BIT; writew(priv(dev)->hw_config_bits, priv(dev)->main_iobase + HW_CONFIG_REG); /* ALT_SOURCE is internal calibration reference */ if (insn->chanspec & CR_ALT_SOURCE) { unsigned int cal_en_bit; DEBUG_PRINT("reading calibration source\n"); if (board(dev)->layout == LAYOUT_60XX) cal_en_bit = CAL_EN_60XX_BIT; else cal_en_bit = CAL_EN_64XX_BIT; /* select internal reference source to connect to channel 0 */ writew(cal_en_bit | adc_src_bits(priv(dev)->calibration_source), priv(dev)->main_iobase + CALIBRATION_REG); } else { /* make sure internal calibration source is turned off */ writew(0, priv(dev)->main_iobase + CALIBRATION_REG); } /* load internal queue */ bits = 0; /* set gain */ bits |= ai_range_bits_6xxx(dev, CR_RANGE(insn->chanspec)); /* set single-ended / differential */ bits |= se_diff_bit_6xxx(dev, aref == AREF_DIFF); if (aref == AREF_COMMON) bits |= ADC_COMMON_BIT; bits |= adc_chan_bits(channel); /* set stop channel */ writew(adc_chan_bits(channel), priv(dev)->main_iobase + ADC_QUEUE_HIGH_REG); /* set start channel, and rest of settings */ writew(bits, priv(dev)->main_iobase + ADC_QUEUE_LOAD_REG); } else { uint8_t old_cal_range_bits = priv(dev)->i2c_cal_range_bits; priv(dev)->i2c_cal_range_bits &= ~ADC_SRC_4020_MASK; if (insn->chanspec & CR_ALT_SOURCE) { DEBUG_PRINT("reading calibration source\n"); priv(dev)->i2c_cal_range_bits |= adc_src_4020_bits(priv(dev)->calibration_source); } else { /* select BNC inputs */ priv(dev)->i2c_cal_range_bits |= adc_src_4020_bits(4); } /* select range */ if (range == 0) priv(dev)->i2c_cal_range_bits |= attenuate_bit(channel); else priv(dev)->i2c_cal_range_bits &= ~attenuate_bit(channel); /* update calibration/range i2c register only if necessary, as it is very slow */ if (old_cal_range_bits != priv(dev)->i2c_cal_range_bits) { uint8_t i2c_data = priv(dev)->i2c_cal_range_bits; i2c_write(dev, RANGE_CAL_I2C_ADDR, &i2c_data, sizeof(i2c_data)); } /* 4020 manual asks that sample interval register to be set before writing to convert register. * Using somewhat arbitrary setting of 4 master clock ticks = 0.1 usec */ writew(0, priv(dev)->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG); writew(2, priv(dev)->main_iobase + ADC_SAMPLE_INTERVAL_LOWER_REG); } for (n = 0; n < insn->n; n++) { /* clear adc buffer (inside loop for 4020 sake) */ writew(0, priv(dev)->main_iobase + ADC_BUFFER_CLEAR_REG); /* trigger conversion, bits sent only matter for 4020 */ writew(adc_convert_chan_4020_bits(CR_CHAN(insn->chanspec)), priv(dev)->main_iobase + ADC_CONVERT_REG); /* wait for data */ for (i = 0; i < timeout; i++) { bits = readw(priv(dev)->main_iobase + HW_STATUS_REG); DEBUG_PRINT(" pipe bits 0x%x\n", pipe_full_bits(bits)); if (board(dev)->layout == LAYOUT_4020) { if (readw(priv(dev)->main_iobase + ADC_WRITE_PNTR_REG)) break; } else { if (pipe_full_bits(bits)) break; } udelay(1); } DEBUG_PRINT(" looped %i times waiting for data\n", i); if (i == timeout) { comedi_error(dev, " analog input read insn timed out"); printk(" status 0x%x\n", bits); return -ETIME; } if (board(dev)->layout == LAYOUT_4020) data[n] = readl(priv(dev)->dio_counter_iobase + ADC_FIFO_REG) & 0xffff; else data[n] = readw(priv(dev)->main_iobase + PIPE1_READ_REG); } return n; } static int ai_config_calibration_source(struct comedi_device *dev, unsigned int *data) { unsigned int source = data[1]; int num_calibration_sources; if (board(dev)->layout == LAYOUT_60XX) num_calibration_sources = 16; else num_calibration_sources = 8; if (source >= num_calibration_sources) { printk("invalid calibration source: %i\n", source); return -EINVAL; } DEBUG_PRINT("setting calibration source to %i\n", source); priv(dev)->calibration_source = source; return 2; } static int ai_config_block_size(struct comedi_device *dev, unsigned int *data) { int fifo_size; const struct hw_fifo_info *const fifo = board(dev)->ai_fifo; unsigned int block_size, requested_block_size; int retval; requested_block_size = data[1]; if (requested_block_size) { fifo_size = requested_block_size * fifo->num_segments / bytes_in_sample; retval = set_ai_fifo_size(dev, fifo_size); if (retval < 0) return retval; } block_size = ai_fifo_size(dev) / fifo->num_segments * bytes_in_sample; data[1] = block_size; return 2; } static int ai_config_master_clock_4020(struct comedi_device *dev, unsigned int *data) { unsigned int divisor = data[4]; int retval = 0; if (divisor < 2) { divisor = 2; retval = -EAGAIN; } switch (data[1]) { case COMEDI_EV_SCAN_BEGIN: priv(dev)->ext_clock.divisor = divisor; priv(dev)->ext_clock.chanspec = data[2]; break; default: return -EINVAL; break; } data[4] = divisor; return retval ? retval : 5; } /* XXX could add support for 60xx series */ static int ai_config_master_clock(struct comedi_device *dev, unsigned int *data) { switch (board(dev)->layout) { case LAYOUT_4020: return ai_config_master_clock_4020(dev, data); break; default: return -EINVAL; break; } return -EINVAL; } static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int id = data[0]; switch (id) { case INSN_CONFIG_ALT_SOURCE: return ai_config_calibration_source(dev, data); break; case INSN_CONFIG_BLOCK_SIZE: return ai_config_block_size(dev, data); break; case INSN_CONFIG_TIMER_1: return ai_config_master_clock(dev, data); break; default: return -EINVAL; break; } return -EINVAL; } static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; unsigned int tmp_arg, tmp_arg2; int i; int aref; unsigned int triggers; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; triggers = TRIG_TIMER; if (board(dev)->layout == LAYOUT_4020) triggers |= TRIG_OTHER; else triggers |= TRIG_FOLLOW; cmd->scan_begin_src &= triggers; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; triggers = TRIG_TIMER; if (board(dev)->layout == LAYOUT_4020) triggers |= TRIG_NOW; else triggers |= TRIG_EXT; cmd->convert_src &= triggers; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_EXT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ /* uniqueness check */ if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_OTHER && cmd->scan_begin_src != TRIG_FOLLOW) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT && cmd->convert_src != TRIG_NOW) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_EXT) err++; /* compatibility check */ if (cmd->convert_src == TRIG_EXT && cmd->scan_begin_src == TRIG_TIMER) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_EXT) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->convert_src == TRIG_TIMER) { if (board(dev)->layout == LAYOUT_4020) { if (cmd->convert_arg) { cmd->convert_arg = 0; err++; } } else { if (cmd->convert_arg < board(dev)->ai_speed) { cmd->convert_arg = board(dev)->ai_speed; err++; } if (cmd->scan_begin_src == TRIG_TIMER) { /* if scans are timed faster than conversion rate allows */ if (cmd->convert_arg * cmd->chanlist_len > cmd->scan_begin_arg) { cmd->scan_begin_arg = cmd->convert_arg * cmd->chanlist_len; err++; } } } } if (!cmd->chanlist_len) { cmd->chanlist_len = 1; err++; } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } switch (cmd->stop_src) { case TRIG_EXT: break; case TRIG_COUNT: if (!cmd->stop_arg) { cmd->stop_arg = 1; err++; } break; case TRIG_NONE: if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } break; default: break; } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->convert_src == TRIG_TIMER) { tmp_arg = cmd->convert_arg; tmp_arg2 = cmd->scan_begin_arg; check_adc_timing(dev, cmd); if (tmp_arg != cmd->convert_arg) err++; if (tmp_arg2 != cmd->scan_begin_arg) err++; } if (err) return 4; /* make sure user is doesn't change analog reference mid chanlist */ if (cmd->chanlist) { aref = CR_AREF(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (aref != CR_AREF(cmd->chanlist[i])) { comedi_error(dev, "all elements in chanlist must use the same analog reference"); err++; break; } } /* check 4020 chanlist */ if (board(dev)->layout == LAYOUT_4020) { unsigned int first_channel = CR_CHAN(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) != first_channel + i) { comedi_error(dev, "chanlist must use consecutive channels"); err++; break; } } if (cmd->chanlist_len == 3) { comedi_error(dev, "chanlist cannot be 3 channels long, use 1, 2, or 4 channels"); err++; } } } if (err) return 5; return 0; } static int use_hw_sample_counter(struct comedi_cmd *cmd) { /* disable for now until I work out a race */ return 0; if (cmd->stop_src == TRIG_COUNT && cmd->stop_arg <= max_counter_value) return 1; else return 0; } static void setup_sample_counters(struct comedi_device *dev, struct comedi_cmd *cmd) { if (cmd->stop_src == TRIG_COUNT) { /* set software count */ priv(dev)->ai_count = cmd->stop_arg * cmd->chanlist_len; } /* load hardware conversion counter */ if (use_hw_sample_counter(cmd)) { writew(cmd->stop_arg & 0xffff, priv(dev)->main_iobase + ADC_COUNT_LOWER_REG); writew((cmd->stop_arg >> 16) & 0xff, priv(dev)->main_iobase + ADC_COUNT_UPPER_REG); } else { writew(1, priv(dev)->main_iobase + ADC_COUNT_LOWER_REG); } } static inline unsigned int dma_transfer_size(struct comedi_device *dev) { unsigned int num_samples; num_samples = priv(dev)->ai_fifo_segment_length * board(dev)->ai_fifo->sample_packing_ratio; if (num_samples > DMA_BUFFER_SIZE / sizeof(uint16_t)) num_samples = DMA_BUFFER_SIZE / sizeof(uint16_t); return num_samples; } static void disable_ai_pacing(struct comedi_device *dev) { unsigned long flags; disable_ai_interrupts(dev); spin_lock_irqsave(&dev->spinlock, flags); priv(dev)->adc_control1_bits &= ~ADC_SW_GATE_BIT; writew(priv(dev)->adc_control1_bits, priv(dev)->main_iobase + ADC_CONTROL1_REG); spin_unlock_irqrestore(&dev->spinlock, flags); /* disable pacing, triggering, etc */ writew(ADC_DMA_DISABLE_BIT | ADC_SOFT_GATE_BITS | ADC_GATE_LEVEL_BIT, priv(dev)->main_iobase + ADC_CONTROL0_REG); } static void disable_ai_interrupts(struct comedi_device *dev) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); priv(dev)->intr_enable_bits &= ~EN_ADC_INTR_SRC_BIT & ~EN_ADC_DONE_INTR_BIT & ~EN_ADC_ACTIVE_INTR_BIT & ~EN_ADC_STOP_INTR_BIT & ~EN_ADC_OVERRUN_BIT & ~ADC_INTR_SRC_MASK; writew(priv(dev)->intr_enable_bits, priv(dev)->main_iobase + INTR_ENABLE_REG); spin_unlock_irqrestore(&dev->spinlock, flags); DEBUG_PRINT("intr enable bits 0x%x\n", priv(dev)->intr_enable_bits); } static void enable_ai_interrupts(struct comedi_device *dev, const struct comedi_cmd *cmd) { uint32_t bits; unsigned long flags; bits = EN_ADC_OVERRUN_BIT | EN_ADC_DONE_INTR_BIT | EN_ADC_ACTIVE_INTR_BIT | EN_ADC_STOP_INTR_BIT; /* Use pio transfer and interrupt on end of conversion if TRIG_WAKE_EOS flag is set. */ if (cmd->flags & TRIG_WAKE_EOS) { /* 4020 doesn't support pio transfers except for fifo dregs */ if (board(dev)->layout != LAYOUT_4020) bits |= ADC_INTR_EOSCAN_BITS | EN_ADC_INTR_SRC_BIT; } spin_lock_irqsave(&dev->spinlock, flags); priv(dev)->intr_enable_bits |= bits; writew(priv(dev)->intr_enable_bits, priv(dev)->main_iobase + INTR_ENABLE_REG); DEBUG_PRINT("intr enable bits 0x%x\n", priv(dev)->intr_enable_bits); spin_unlock_irqrestore(&dev->spinlock, flags); } static uint32_t ai_convert_counter_6xxx(const struct comedi_device *dev, const struct comedi_cmd *cmd) { /* supposed to load counter with desired divisor minus 3 */ return cmd->convert_arg / TIMER_BASE - 3; } static uint32_t ai_scan_counter_6xxx(struct comedi_device *dev, struct comedi_cmd *cmd) { uint32_t count; /* figure out how long we need to delay at end of scan */ switch (cmd->scan_begin_src) { case TRIG_TIMER: count = (cmd->scan_begin_arg - (cmd->convert_arg * (cmd->chanlist_len - 1))) / TIMER_BASE; break; case TRIG_FOLLOW: count = cmd->convert_arg / TIMER_BASE; break; default: return 0; break; } return count - 3; } static uint32_t ai_convert_counter_4020(struct comedi_device *dev, struct comedi_cmd *cmd) { unsigned int divisor; switch (cmd->scan_begin_src) { case TRIG_TIMER: divisor = cmd->scan_begin_arg / TIMER_BASE; break; case TRIG_OTHER: divisor = priv(dev)->ext_clock.divisor; break; default: /* should never happen */ comedi_error(dev, "bug! failed to set ai pacing!"); divisor = 1000; break; } /* supposed to load counter with desired divisor minus 2 for 4020 */ return divisor - 2; } static void select_master_clock_4020(struct comedi_device *dev, const struct comedi_cmd *cmd) { /* select internal/external master clock */ priv(dev)->hw_config_bits &= ~MASTER_CLOCK_4020_MASK; if (cmd->scan_begin_src == TRIG_OTHER) { int chanspec = priv(dev)->ext_clock.chanspec; if (CR_CHAN(chanspec)) priv(dev)->hw_config_bits |= BNC_CLOCK_4020_BITS; else priv(dev)->hw_config_bits |= EXT_CLOCK_4020_BITS; } else { priv(dev)->hw_config_bits |= INTERNAL_CLOCK_4020_BITS; } writew(priv(dev)->hw_config_bits, priv(dev)->main_iobase + HW_CONFIG_REG); } static void select_master_clock(struct comedi_device *dev, const struct comedi_cmd *cmd) { switch (board(dev)->layout) { case LAYOUT_4020: select_master_clock_4020(dev, cmd); break; default: break; } } static inline void dma_start_sync(struct comedi_device *dev, unsigned int channel) { unsigned long flags; /* spinlock for plx dma control/status reg */ spin_lock_irqsave(&dev->spinlock, flags); if (channel) writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT, priv(dev)->plx9080_iobase + PLX_DMA1_CS_REG); else writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT, priv(dev)->plx9080_iobase + PLX_DMA0_CS_REG); spin_unlock_irqrestore(&dev->spinlock, flags); } static void set_ai_pacing(struct comedi_device *dev, struct comedi_cmd *cmd) { uint32_t convert_counter = 0, scan_counter = 0; check_adc_timing(dev, cmd); select_master_clock(dev, cmd); if (board(dev)->layout == LAYOUT_4020) { convert_counter = ai_convert_counter_4020(dev, cmd); } else { convert_counter = ai_convert_counter_6xxx(dev, cmd); scan_counter = ai_scan_counter_6xxx(dev, cmd); } /* load lower 16 bits of convert interval */ writew(convert_counter & 0xffff, priv(dev)->main_iobase + ADC_SAMPLE_INTERVAL_LOWER_REG); DEBUG_PRINT("convert counter 0x%x\n", convert_counter); /* load upper 8 bits of convert interval */ writew((convert_counter >> 16) & 0xff, priv(dev)->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG); /* load lower 16 bits of scan delay */ writew(scan_counter & 0xffff, priv(dev)->main_iobase + ADC_DELAY_INTERVAL_LOWER_REG); /* load upper 8 bits of scan delay */ writew((scan_counter >> 16) & 0xff, priv(dev)->main_iobase + ADC_DELAY_INTERVAL_UPPER_REG); DEBUG_PRINT("scan counter 0x%x\n", scan_counter); } static int use_internal_queue_6xxx(const struct comedi_cmd *cmd) { int i; for (i = 0; i + 1 < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i + 1]) != CR_CHAN(cmd->chanlist[i]) + 1) return 0; if (CR_RANGE(cmd->chanlist[i + 1]) != CR_RANGE(cmd->chanlist[i])) return 0; if (CR_AREF(cmd->chanlist[i + 1]) != CR_AREF(cmd->chanlist[i])) return 0; } return 1; } static int setup_channel_queue(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned short bits; int i; if (board(dev)->layout != LAYOUT_4020) { if (use_internal_queue_6xxx(cmd)) { priv(dev)->hw_config_bits &= ~EXT_QUEUE_BIT; writew(priv(dev)->hw_config_bits, priv(dev)->main_iobase + HW_CONFIG_REG); bits = 0; /* set channel */ bits |= adc_chan_bits(CR_CHAN(cmd->chanlist[0])); /* set gain */ bits |= ai_range_bits_6xxx(dev, CR_RANGE(cmd->chanlist[0])); /* set single-ended / differential */ bits |= se_diff_bit_6xxx(dev, CR_AREF(cmd->chanlist[0]) == AREF_DIFF); if (CR_AREF(cmd->chanlist[0]) == AREF_COMMON) bits |= ADC_COMMON_BIT; /* set stop channel */ writew(adc_chan_bits (CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1])), priv(dev)->main_iobase + ADC_QUEUE_HIGH_REG); /* set start channel, and rest of settings */ writew(bits, priv(dev)->main_iobase + ADC_QUEUE_LOAD_REG); } else { /* use external queue */ if (dev->write_subdev && dev->write_subdev->busy) { warn_external_queue(dev); return -EBUSY; } priv(dev)->hw_config_bits |= EXT_QUEUE_BIT; writew(priv(dev)->hw_config_bits, priv(dev)->main_iobase + HW_CONFIG_REG); /* clear DAC buffer to prevent weird interactions */ writew(0, priv(dev)->main_iobase + DAC_BUFFER_CLEAR_REG); /* clear queue pointer */ writew(0, priv(dev)->main_iobase + ADC_QUEUE_CLEAR_REG); /* load external queue */ for (i = 0; i < cmd->chanlist_len; i++) { bits = 0; /* set channel */ bits |= adc_chan_bits(CR_CHAN(cmd->chanlist[i])); /* set gain */ bits |= ai_range_bits_6xxx(dev, CR_RANGE(cmd-> chanlist [i])); /* set single-ended / differential */ bits |= se_diff_bit_6xxx(dev, CR_AREF(cmd-> chanlist[i]) == AREF_DIFF); if (CR_AREF(cmd->chanlist[i]) == AREF_COMMON) bits |= ADC_COMMON_BIT; /* mark end of queue */ if (i == cmd->chanlist_len - 1) bits |= QUEUE_EOSCAN_BIT | QUEUE_EOSEQ_BIT; writew(bits, priv(dev)->main_iobase + ADC_QUEUE_FIFO_REG); DEBUG_PRINT ("wrote 0x%x to external channel queue\n", bits); } /* doing a queue clear is not specified in board docs, * but required for reliable operation */ writew(0, priv(dev)->main_iobase + ADC_QUEUE_CLEAR_REG); /* prime queue holding register */ writew(0, priv(dev)->main_iobase + ADC_QUEUE_LOAD_REG); } } else { unsigned short old_cal_range_bits = priv(dev)->i2c_cal_range_bits; priv(dev)->i2c_cal_range_bits &= ~ADC_SRC_4020_MASK; /* select BNC inputs */ priv(dev)->i2c_cal_range_bits |= adc_src_4020_bits(4); /* select ranges */ for (i = 0; i < cmd->chanlist_len; i++) { unsigned int channel = CR_CHAN(cmd->chanlist[i]); unsigned int range = CR_RANGE(cmd->chanlist[i]); if (range == 0) priv(dev)->i2c_cal_range_bits |= attenuate_bit(channel); else priv(dev)->i2c_cal_range_bits &= ~attenuate_bit(channel); } /* update calibration/range i2c register only if necessary, as it is very slow */ if (old_cal_range_bits != priv(dev)->i2c_cal_range_bits) { uint8_t i2c_data = priv(dev)->i2c_cal_range_bits; i2c_write(dev, RANGE_CAL_I2C_ADDR, &i2c_data, sizeof(i2c_data)); } } return 0; } static inline void load_first_dma_descriptor(struct comedi_device *dev, unsigned int dma_channel, unsigned int descriptor_bits) { /* The transfer size, pci address, and local address registers * are supposedly unused during chained dma, * but I have found that left over values from last operation * occasionally cause problems with transfer of first dma * block. Initializing them to zero seems to fix the problem. */ if (dma_channel) { writel(0, priv(dev)->plx9080_iobase + PLX_DMA1_TRANSFER_SIZE_REG); writel(0, priv(dev)->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG); writel(0, priv(dev)->plx9080_iobase + PLX_DMA1_LOCAL_ADDRESS_REG); writel(descriptor_bits, priv(dev)->plx9080_iobase + PLX_DMA1_DESCRIPTOR_REG); } else { writel(0, priv(dev)->plx9080_iobase + PLX_DMA0_TRANSFER_SIZE_REG); writel(0, priv(dev)->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG); writel(0, priv(dev)->plx9080_iobase + PLX_DMA0_LOCAL_ADDRESS_REG); writel(descriptor_bits, priv(dev)->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG); } } static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; uint32_t bits; unsigned int i; unsigned long flags; int retval; disable_ai_pacing(dev); abort_dma(dev, 1); retval = setup_channel_queue(dev, cmd); if (retval < 0) return retval; /* make sure internal calibration source is turned off */ writew(0, priv(dev)->main_iobase + CALIBRATION_REG); set_ai_pacing(dev, cmd); setup_sample_counters(dev, cmd); enable_ai_interrupts(dev, cmd); spin_lock_irqsave(&dev->spinlock, flags); /* set mode, allow conversions through software gate */ priv(dev)->adc_control1_bits |= ADC_SW_GATE_BIT; priv(dev)->adc_control1_bits &= ~ADC_DITHER_BIT; if (board(dev)->layout != LAYOUT_4020) { priv(dev)->adc_control1_bits &= ~ADC_MODE_MASK; if (cmd->convert_src == TRIG_EXT) priv(dev)->adc_control1_bits |= adc_mode_bits(13); /* good old mode 13 */ else priv(dev)->adc_control1_bits |= adc_mode_bits(8); /* mode 8. What else could you need? */ } else { priv(dev)->adc_control1_bits &= ~CHANNEL_MODE_4020_MASK; if (cmd->chanlist_len == 4) priv(dev)->adc_control1_bits |= FOUR_CHANNEL_4020_BITS; else if (cmd->chanlist_len == 2) priv(dev)->adc_control1_bits |= TWO_CHANNEL_4020_BITS; priv(dev)->adc_control1_bits &= ~ADC_LO_CHANNEL_4020_MASK; priv(dev)->adc_control1_bits |= adc_lo_chan_4020_bits(CR_CHAN(cmd->chanlist[0])); priv(dev)->adc_control1_bits &= ~ADC_HI_CHANNEL_4020_MASK; priv(dev)->adc_control1_bits |= adc_hi_chan_4020_bits(CR_CHAN (cmd-> chanlist[cmd->chanlist_len - 1])); } writew(priv(dev)->adc_control1_bits, priv(dev)->main_iobase + ADC_CONTROL1_REG); DEBUG_PRINT("control1 bits 0x%x\n", priv(dev)->adc_control1_bits); spin_unlock_irqrestore(&dev->spinlock, flags); /* clear adc buffer */ writew(0, priv(dev)->main_iobase + ADC_BUFFER_CLEAR_REG); if ((cmd->flags & TRIG_WAKE_EOS) == 0 || board(dev)->layout == LAYOUT_4020) { priv(dev)->ai_dma_index = 0; /* set dma transfer size */ for (i = 0; i < ai_dma_ring_count(board(dev)); i++) priv(dev)->ai_dma_desc[i].transfer_size = cpu_to_le32(dma_transfer_size(dev) * sizeof(uint16_t)); /* give location of first dma descriptor */ load_first_dma_descriptor(dev, 1, priv(dev)->ai_dma_desc_bus_addr | PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI); dma_start_sync(dev, 1); } if (board(dev)->layout == LAYOUT_4020) { /* set source for external triggers */ bits = 0; if (cmd->start_src == TRIG_EXT && CR_CHAN(cmd->start_arg)) bits |= EXT_START_TRIG_BNC_BIT; if (cmd->stop_src == TRIG_EXT && CR_CHAN(cmd->stop_arg)) bits |= EXT_STOP_TRIG_BNC_BIT; writew(bits, priv(dev)->main_iobase + DAQ_ATRIG_LOW_4020_REG); } spin_lock_irqsave(&dev->spinlock, flags); /* enable pacing, triggering, etc */ bits = ADC_ENABLE_BIT | ADC_SOFT_GATE_BITS | ADC_GATE_LEVEL_BIT; if (cmd->flags & TRIG_WAKE_EOS) bits |= ADC_DMA_DISABLE_BIT; /* set start trigger */ if (cmd->start_src == TRIG_EXT) { bits |= ADC_START_TRIG_EXT_BITS; if (cmd->start_arg & CR_INVERT) bits |= ADC_START_TRIG_FALLING_BIT; } else if (cmd->start_src == TRIG_NOW) bits |= ADC_START_TRIG_SOFT_BITS; if (use_hw_sample_counter(cmd)) bits |= ADC_SAMPLE_COUNTER_EN_BIT; writew(bits, priv(dev)->main_iobase + ADC_CONTROL0_REG); DEBUG_PRINT("control0 bits 0x%x\n", bits); priv(dev)->ai_cmd_running = 1; spin_unlock_irqrestore(&dev->spinlock, flags); /* start aquisition */ if (cmd->start_src == TRIG_NOW) { writew(0, priv(dev)->main_iobase + ADC_START_REG); DEBUG_PRINT("soft trig\n"); } return 0; } /* read num_samples from 16 bit wide ai fifo */ static void pio_drain_ai_fifo_16(struct comedi_device *dev) { struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int i; uint16_t prepost_bits; int read_segment, read_index, write_segment, write_index; int num_samples; do { /* get least significant 15 bits */ read_index = readw(priv(dev)->main_iobase + ADC_READ_PNTR_REG) & 0x7fff; write_index = readw(priv(dev)->main_iobase + ADC_WRITE_PNTR_REG) & 0x7fff; /* Get most significant bits (grey code). Different boards use different code * so use a scheme that doesn't depend on encoding. This read must * occur after reading least significant 15 bits to avoid race * with fifo switching to next segment. */ prepost_bits = readw(priv(dev)->main_iobase + PREPOST_REG); /* if read and write pointers are not on the same fifo segment, read to the * end of the read segment */ read_segment = adc_upper_read_ptr_code(prepost_bits); write_segment = adc_upper_write_ptr_code(prepost_bits); DEBUG_PRINT(" rd seg %i, wrt seg %i, rd idx %i, wrt idx %i\n", read_segment, write_segment, read_index, write_index); if (read_segment != write_segment) num_samples = priv(dev)->ai_fifo_segment_length - read_index; else num_samples = write_index - read_index; if (cmd->stop_src == TRIG_COUNT) { if (priv(dev)->ai_count == 0) break; if (num_samples > priv(dev)->ai_count) num_samples = priv(dev)->ai_count; priv(dev)->ai_count -= num_samples; } if (num_samples < 0) { printk(" cb_pcidas64: bug! num_samples < 0\n"); break; } DEBUG_PRINT(" read %i samples from fifo\n", num_samples); for (i = 0; i < num_samples; i++) { cfc_write_to_buffer(s, readw(priv(dev)->main_iobase + ADC_FIFO_REG)); } } while (read_segment != write_segment); } /* Read from 32 bit wide ai fifo of 4020 - deal with insane grey coding of pointers. * The pci-4020 hardware only supports * dma transfers (it only supports the use of pio for draining the last remaining * points from the fifo when a data aquisition operation has completed). */ static void pio_drain_ai_fifo_32(struct comedi_device *dev) { struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int i; unsigned int max_transfer = 100000; uint32_t fifo_data; int write_code = readw(priv(dev)->main_iobase + ADC_WRITE_PNTR_REG) & 0x7fff; int read_code = readw(priv(dev)->main_iobase + ADC_READ_PNTR_REG) & 0x7fff; if (cmd->stop_src == TRIG_COUNT) { if (max_transfer > priv(dev)->ai_count) max_transfer = priv(dev)->ai_count; } for (i = 0; read_code != write_code && i < max_transfer;) { fifo_data = readl(priv(dev)->dio_counter_iobase + ADC_FIFO_REG); cfc_write_to_buffer(s, fifo_data & 0xffff); i++; if (i < max_transfer) { cfc_write_to_buffer(s, (fifo_data >> 16) & 0xffff); i++; } read_code = readw(priv(dev)->main_iobase + ADC_READ_PNTR_REG) & 0x7fff; } priv(dev)->ai_count -= i; } /* empty fifo */ static void pio_drain_ai_fifo(struct comedi_device *dev) { if (board(dev)->layout == LAYOUT_4020) pio_drain_ai_fifo_32(dev); else pio_drain_ai_fifo_16(dev); } static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel) { struct comedi_async *async = dev->read_subdev->async; uint32_t next_transfer_addr; int j; int num_samples = 0; void __iomem *pci_addr_reg; if (channel) pci_addr_reg = priv(dev)->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG; else pci_addr_reg = priv(dev)->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG; /* loop until we have read all the full buffers */ for (j = 0, next_transfer_addr = readl(pci_addr_reg); (next_transfer_addr < priv(dev)->ai_buffer_bus_addr[priv(dev)->ai_dma_index] || next_transfer_addr >= priv(dev)->ai_buffer_bus_addr[priv(dev)->ai_dma_index] + DMA_BUFFER_SIZE) && j < ai_dma_ring_count(board(dev)); j++) { /* transfer data from dma buffer to comedi buffer */ num_samples = dma_transfer_size(dev); if (async->cmd.stop_src == TRIG_COUNT) { if (num_samples > priv(dev)->ai_count) num_samples = priv(dev)->ai_count; priv(dev)->ai_count -= num_samples; } cfc_write_array_to_buffer(dev->read_subdev, priv(dev)->ai_buffer[priv(dev)-> ai_dma_index], num_samples * sizeof(uint16_t)); priv(dev)->ai_dma_index = (priv(dev)->ai_dma_index + 1) % ai_dma_ring_count(board(dev)); DEBUG_PRINT("next buffer addr 0x%lx\n", (unsigned long)priv(dev)-> ai_buffer_bus_addr[priv(dev)->ai_dma_index]); DEBUG_PRINT("pci addr reg 0x%x\n", next_transfer_addr); } /* XXX check for dma ring buffer overrun (use end-of-chain bit to mark last * unused buffer) */ } static void handle_ai_interrupt(struct comedi_device *dev, unsigned short status, unsigned int plx_status) { struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; uint8_t dma1_status; unsigned long flags; /* check for fifo overrun */ if (status & ADC_OVERRUN_BIT) { comedi_error(dev, "fifo overrun"); async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; } /* spin lock makes sure noone else changes plx dma control reg */ spin_lock_irqsave(&dev->spinlock, flags); dma1_status = readb(priv(dev)->plx9080_iobase + PLX_DMA1_CS_REG); if (plx_status & ICS_DMA1_A) { /* dma chan 1 interrupt */ writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT, priv(dev)->plx9080_iobase + PLX_DMA1_CS_REG); DEBUG_PRINT("dma1 status 0x%x\n", dma1_status); if (dma1_status & PLX_DMA_EN_BIT) drain_dma_buffers(dev, 1); DEBUG_PRINT(" cleared dma ch1 interrupt\n"); } spin_unlock_irqrestore(&dev->spinlock, flags); if (status & ADC_DONE_BIT) DEBUG_PRINT("adc done interrupt\n"); /* drain fifo with pio */ if ((status & ADC_DONE_BIT) || ((cmd->flags & TRIG_WAKE_EOS) && (status & ADC_INTR_PENDING_BIT) && (board(dev)->layout != LAYOUT_4020))) { DEBUG_PRINT("pio fifo drain\n"); spin_lock_irqsave(&dev->spinlock, flags); if (priv(dev)->ai_cmd_running) { spin_unlock_irqrestore(&dev->spinlock, flags); pio_drain_ai_fifo(dev); } else spin_unlock_irqrestore(&dev->spinlock, flags); } /* if we are have all the data, then quit */ if ((cmd->stop_src == TRIG_COUNT && priv(dev)->ai_count <= 0) || (cmd->stop_src == TRIG_EXT && (status & ADC_STOP_BIT))) { async->events |= COMEDI_CB_EOA; } cfc_handle_events(dev, s); } static inline unsigned int prev_ao_dma_index(struct comedi_device *dev) { unsigned int buffer_index; if (priv(dev)->ao_dma_index == 0) buffer_index = AO_DMA_RING_COUNT - 1; else buffer_index = priv(dev)->ao_dma_index - 1; return buffer_index; } static int last_ao_dma_load_completed(struct comedi_device *dev) { unsigned int buffer_index; unsigned int transfer_address; unsigned short dma_status; buffer_index = prev_ao_dma_index(dev); dma_status = readb(priv(dev)->plx9080_iobase + PLX_DMA0_CS_REG); if ((dma_status & PLX_DMA_DONE_BIT) == 0) return 0; transfer_address = readl(priv(dev)->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG); if (transfer_address != priv(dev)->ao_buffer_bus_addr[buffer_index]) return 0; return 1; } static int ao_stopped_by_error(struct comedi_device *dev, const struct comedi_cmd *cmd) { if (cmd->stop_src == TRIG_NONE) return 1; if (cmd->stop_src == TRIG_COUNT) { if (priv(dev)->ao_count) return 1; if (last_ao_dma_load_completed(dev) == 0) return 1; } return 0; } static inline int ao_dma_needs_restart(struct comedi_device *dev, unsigned short dma_status) { if ((dma_status & PLX_DMA_DONE_BIT) == 0 || (dma_status & PLX_DMA_EN_BIT) == 0) return 0; if (last_ao_dma_load_completed(dev)) return 0; return 1; } static void restart_ao_dma(struct comedi_device *dev) { unsigned int dma_desc_bits; dma_desc_bits = readl(priv(dev)->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG); dma_desc_bits &= ~PLX_END_OF_CHAIN_BIT; DEBUG_PRINT("restarting ao dma, descriptor reg 0x%x\n", dma_desc_bits); load_first_dma_descriptor(dev, 0, dma_desc_bits); dma_start_sync(dev, 0); } static void handle_ao_interrupt(struct comedi_device *dev, unsigned short status, unsigned int plx_status) { struct comedi_subdevice *s = dev->write_subdev; struct comedi_async *async; struct comedi_cmd *cmd; uint8_t dma0_status; unsigned long flags; /* board might not support ao, in which case write_subdev is NULL */ if (s == NULL) return; async = s->async; cmd = &async->cmd; /* spin lock makes sure noone else changes plx dma control reg */ spin_lock_irqsave(&dev->spinlock, flags); dma0_status = readb(priv(dev)->plx9080_iobase + PLX_DMA0_CS_REG); if (plx_status & ICS_DMA0_A) { /* dma chan 0 interrupt */ if ((dma0_status & PLX_DMA_EN_BIT) && !(dma0_status & PLX_DMA_DONE_BIT)) writeb(PLX_DMA_EN_BIT | PLX_CLEAR_DMA_INTR_BIT, priv(dev)->plx9080_iobase + PLX_DMA0_CS_REG); else writeb(PLX_CLEAR_DMA_INTR_BIT, priv(dev)->plx9080_iobase + PLX_DMA0_CS_REG); spin_unlock_irqrestore(&dev->spinlock, flags); DEBUG_PRINT("dma0 status 0x%x\n", dma0_status); if (dma0_status & PLX_DMA_EN_BIT) { load_ao_dma(dev, cmd); /* try to recover from dma end-of-chain event */ if (ao_dma_needs_restart(dev, dma0_status)) restart_ao_dma(dev); } DEBUG_PRINT(" cleared dma ch0 interrupt\n"); } else spin_unlock_irqrestore(&dev->spinlock, flags); if ((status & DAC_DONE_BIT)) { async->events |= COMEDI_CB_EOA; if (ao_stopped_by_error(dev, cmd)) async->events |= COMEDI_CB_ERROR; DEBUG_PRINT("plx dma0 desc reg 0x%x\n", readl(priv(dev)->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG)); DEBUG_PRINT("plx dma0 address reg 0x%x\n", readl(priv(dev)->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG)); } cfc_handle_events(dev, s); } static irqreturn_t handle_interrupt(int irq, void *d) { struct comedi_device *dev = d; unsigned short status; uint32_t plx_status; uint32_t plx_bits; plx_status = readl(priv(dev)->plx9080_iobase + PLX_INTRCS_REG); status = readw(priv(dev)->main_iobase + HW_STATUS_REG); DEBUG_PRINT("cb_pcidas64: hw status 0x%x ", status); DEBUG_PRINT("plx status 0x%x\n", plx_status); /* an interrupt before all the postconfig stuff gets done could * cause a NULL dereference if we continue through the * interrupt handler */ if (dev->attached == 0) { DEBUG_PRINT("cb_pcidas64: premature interrupt, ignoring", status); return IRQ_HANDLED; } handle_ai_interrupt(dev, status, plx_status); handle_ao_interrupt(dev, status, plx_status); /* clear possible plx9080 interrupt sources */ if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */ plx_bits = readl(priv(dev)->plx9080_iobase + PLX_DBR_OUT_REG); writel(plx_bits, priv(dev)->plx9080_iobase + PLX_DBR_OUT_REG); DEBUG_PRINT(" cleared local doorbell bits 0x%x\n", plx_bits); } DEBUG_PRINT("exiting handler\n"); return IRQ_HANDLED; } static void abort_dma(struct comedi_device *dev, unsigned int channel) { unsigned long flags; /* spinlock for plx dma control/status reg */ spin_lock_irqsave(&dev->spinlock, flags); plx9080_abort_dma(priv(dev)->plx9080_iobase, channel); spin_unlock_irqrestore(&dev->spinlock, flags); } static int ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); if (priv(dev)->ai_cmd_running == 0) { spin_unlock_irqrestore(&dev->spinlock, flags); return 0; } priv(dev)->ai_cmd_running = 0; spin_unlock_irqrestore(&dev->spinlock, flags); disable_ai_pacing(dev); abort_dma(dev, 1); DEBUG_PRINT("ai canceled\n"); return 0; } static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); int range = CR_RANGE(insn->chanspec); /* do some initializing */ writew(0, priv(dev)->main_iobase + DAC_CONTROL0_REG); /* set range */ set_dac_range_bits(dev, &priv(dev)->dac_control1_bits, chan, range); writew(priv(dev)->dac_control1_bits, priv(dev)->main_iobase + DAC_CONTROL1_REG); /* write to channel */ if (board(dev)->layout == LAYOUT_4020) { writew(data[0] & 0xff, priv(dev)->main_iobase + dac_lsb_4020_reg(chan)); writew((data[0] >> 8) & 0xf, priv(dev)->main_iobase + dac_msb_4020_reg(chan)); } else { writew(data[0], priv(dev)->main_iobase + dac_convert_reg(chan)); } /* remember output value */ priv(dev)->ao_value[chan] = data[0]; return 1; } static int ao_readback_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = priv(dev)->ao_value[CR_CHAN(insn->chanspec)]; return 1; } static void set_dac_control0_reg(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned int bits = DAC_ENABLE_BIT | WAVEFORM_GATE_LEVEL_BIT | WAVEFORM_GATE_ENABLE_BIT | WAVEFORM_GATE_SELECT_BIT; if (cmd->start_src == TRIG_EXT) { bits |= WAVEFORM_TRIG_EXT_BITS; if (cmd->start_arg & CR_INVERT) bits |= WAVEFORM_TRIG_FALLING_BIT; } else { bits |= WAVEFORM_TRIG_SOFT_BITS; } if (cmd->scan_begin_src == TRIG_EXT) { bits |= DAC_EXT_UPDATE_ENABLE_BIT; if (cmd->scan_begin_arg & CR_INVERT) bits |= DAC_EXT_UPDATE_FALLING_BIT; } writew(bits, priv(dev)->main_iobase + DAC_CONTROL0_REG); } static void set_dac_control1_reg(struct comedi_device *dev, const struct comedi_cmd *cmd) { int i; for (i = 0; i < cmd->chanlist_len; i++) { int channel, range; channel = CR_CHAN(cmd->chanlist[i]); range = CR_RANGE(cmd->chanlist[i]); set_dac_range_bits(dev, &priv(dev)->dac_control1_bits, channel, range); } priv(dev)->dac_control1_bits |= DAC_SW_GATE_BIT; writew(priv(dev)->dac_control1_bits, priv(dev)->main_iobase + DAC_CONTROL1_REG); } static void set_dac_select_reg(struct comedi_device *dev, const struct comedi_cmd *cmd) { uint16_t bits; unsigned int first_channel, last_channel; first_channel = CR_CHAN(cmd->chanlist[0]); last_channel = CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]); if (last_channel < first_channel) comedi_error(dev, "bug! last ao channel < first ao channel"); bits = (first_channel & 0x7) | (last_channel & 0x7) << 3; writew(bits, priv(dev)->main_iobase + DAC_SELECT_REG); } static void set_dac_interval_regs(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned int divisor; if (cmd->scan_begin_src != TRIG_TIMER) return; divisor = get_ao_divisor(cmd->scan_begin_arg, cmd->flags); if (divisor > max_counter_value) { comedi_error(dev, "bug! ao divisor too big"); divisor = max_counter_value; } writew(divisor & 0xffff, priv(dev)->main_iobase + DAC_SAMPLE_INTERVAL_LOWER_REG); writew((divisor >> 16) & 0xff, priv(dev)->main_iobase + DAC_SAMPLE_INTERVAL_UPPER_REG); } static unsigned int load_ao_dma_buffer(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned int num_bytes, buffer_index, prev_buffer_index; unsigned int next_bits; buffer_index = priv(dev)->ao_dma_index; prev_buffer_index = prev_ao_dma_index(dev); DEBUG_PRINT("attempting to load ao buffer %i (0x%x)\n", buffer_index, priv(dev)->ao_buffer_bus_addr[buffer_index]); num_bytes = comedi_buf_read_n_available(dev->write_subdev->async); if (num_bytes > DMA_BUFFER_SIZE) num_bytes = DMA_BUFFER_SIZE; if (cmd->stop_src == TRIG_COUNT && num_bytes > priv(dev)->ao_count) num_bytes = priv(dev)->ao_count; num_bytes -= num_bytes % bytes_in_sample; if (num_bytes == 0) return 0; DEBUG_PRINT("loading %i bytes\n", num_bytes); num_bytes = cfc_read_array_from_buffer(dev->write_subdev, priv(dev)-> ao_buffer[buffer_index], num_bytes); priv(dev)->ao_dma_desc[buffer_index].transfer_size = cpu_to_le32(num_bytes); /* set end of chain bit so we catch underruns */ next_bits = le32_to_cpu(priv(dev)->ao_dma_desc[buffer_index].next); next_bits |= PLX_END_OF_CHAIN_BIT; priv(dev)->ao_dma_desc[buffer_index].next = cpu_to_le32(next_bits); /* clear end of chain bit on previous buffer now that we have set it * for the last buffer */ next_bits = le32_to_cpu(priv(dev)->ao_dma_desc[prev_buffer_index].next); next_bits &= ~PLX_END_OF_CHAIN_BIT; priv(dev)->ao_dma_desc[prev_buffer_index].next = cpu_to_le32(next_bits); priv(dev)->ao_dma_index = (buffer_index + 1) % AO_DMA_RING_COUNT; priv(dev)->ao_count -= num_bytes; return num_bytes; } static void load_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned int num_bytes; unsigned int next_transfer_addr; void __iomem *pci_addr_reg = priv(dev)->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG; unsigned int buffer_index; do { buffer_index = priv(dev)->ao_dma_index; /* don't overwrite data that hasn't been transferred yet */ next_transfer_addr = readl(pci_addr_reg); if (next_transfer_addr >= priv(dev)->ao_buffer_bus_addr[buffer_index] && next_transfer_addr < priv(dev)->ao_buffer_bus_addr[buffer_index] + DMA_BUFFER_SIZE) return; num_bytes = load_ao_dma_buffer(dev, cmd); } while (num_bytes >= DMA_BUFFER_SIZE); } static int prep_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned int num_bytes; int i; /* clear queue pointer too, since external queue has * weird interactions with ao fifo */ writew(0, priv(dev)->main_iobase + ADC_QUEUE_CLEAR_REG); writew(0, priv(dev)->main_iobase + DAC_BUFFER_CLEAR_REG); num_bytes = (DAC_FIFO_SIZE / 2) * bytes_in_sample; if (cmd->stop_src == TRIG_COUNT && num_bytes / bytes_in_sample > priv(dev)->ao_count) num_bytes = priv(dev)->ao_count * bytes_in_sample; num_bytes = cfc_read_array_from_buffer(dev->write_subdev, priv(dev)->ao_bounce_buffer, num_bytes); for (i = 0; i < num_bytes / bytes_in_sample; i++) { writew(priv(dev)->ao_bounce_buffer[i], priv(dev)->main_iobase + DAC_FIFO_REG); } priv(dev)->ao_count -= num_bytes / bytes_in_sample; if (cmd->stop_src == TRIG_COUNT && priv(dev)->ao_count == 0) return 0; num_bytes = load_ao_dma_buffer(dev, cmd); if (num_bytes == 0) return -1; if (num_bytes >= DMA_BUFFER_SIZE) ; load_ao_dma(dev, cmd); dma_start_sync(dev, 0); return 0; } static inline int external_ai_queue_in_use(struct comedi_device *dev) { if (dev->read_subdev->busy) return 0; if (board(dev)->layout == LAYOUT_4020) return 0; else if (use_internal_queue_6xxx(&dev->read_subdev->async->cmd)) return 0; return 1; } static int ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; if (external_ai_queue_in_use(dev)) { warn_external_queue(dev); return -EBUSY; } /* disable analog output system during setup */ writew(0x0, priv(dev)->main_iobase + DAC_CONTROL0_REG); priv(dev)->ao_dma_index = 0; priv(dev)->ao_count = cmd->stop_arg * cmd->chanlist_len; set_dac_select_reg(dev, cmd); set_dac_interval_regs(dev, cmd); load_first_dma_descriptor(dev, 0, priv(dev)->ao_dma_desc_bus_addr | PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT); set_dac_control1_reg(dev, cmd); s->async->inttrig = ao_inttrig; return 0; } static int ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trig_num) { struct comedi_cmd *cmd = &s->async->cmd; int retval; if (trig_num != 0) return -EINVAL; retval = prep_ao_dma(dev, cmd); if (retval < 0) return -EPIPE; set_dac_control0_reg(dev, cmd); if (cmd->start_src == TRIG_INT) writew(0, priv(dev)->main_iobase + DAC_START_REG); s->async->inttrig = NULL; return 0; } static int ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; unsigned int tmp_arg; int i; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_INT | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ /* uniqueness check */ if (cmd->start_src != TRIG_INT && cmd->start_src != TRIG_EXT) err++; if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; /* compatibility check */ if (cmd->convert_src == TRIG_EXT && cmd->scan_begin_src == TRIG_TIMER) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_EXT) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < board(dev)->ao_scan_speed) { cmd->scan_begin_arg = board(dev)->ao_scan_speed; err++; } if (get_ao_divisor(cmd->scan_begin_arg, cmd->flags) > max_counter_value) { cmd->scan_begin_arg = (max_counter_value + 2) * TIMER_BASE; err++; } } if (!cmd->chanlist_len) { cmd->chanlist_len = 1; err++; } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp_arg = cmd->scan_begin_arg; cmd->scan_begin_arg = get_divisor(cmd->scan_begin_arg, cmd->flags) * TIMER_BASE; if (tmp_arg != cmd->scan_begin_arg) err++; } if (err) return 4; if (cmd->chanlist) { unsigned int first_channel = CR_CHAN(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) != first_channel + i) { comedi_error(dev, "chanlist must use consecutive channels"); err++; break; } } } if (err) return 5; return 0; } static int ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { writew(0x0, priv(dev)->main_iobase + DAC_CONTROL0_REG); abort_dma(dev, 0); return 0; } static int dio_callback(int dir, int port, int data, unsigned long arg) { void __iomem *iobase = (void __iomem *)arg; if (dir) { writeb(data, iobase + port); DEBUG_PRINT("wrote 0x%x to port %i\n", data, port); return 0; } else { return readb(iobase + port); } } static int dio_callback_4020(int dir, int port, int data, unsigned long arg) { void __iomem *iobase = (void __iomem *)arg; if (dir) { writew(data, iobase + 2 * port); return 0; } else { return readw(iobase + 2 * port); } } static int di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int bits; bits = readb(priv(dev)->dio_counter_iobase + DI_REG); bits &= 0xf; data[1] = bits; data[0] = 0; return 2; } static int do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] &= 0xf; /* zero bits we are going to change */ s->state &= ~data[0]; /* set new bits */ s->state |= data[0] & data[1]; writeb(s->state, priv(dev)->dio_counter_iobase + DO_REG); data[1] = s->state; return 2; } static int dio_60xx_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask; mask = 1 << CR_CHAN(insn->chanspec); switch (data[0]) { case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~mask; break; case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= mask; break; case INSN_CONFIG_DIO_QUERY: data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT; return 2; default: return -EINVAL; } writeb(s->io_bits, priv(dev)->dio_counter_iobase + DIO_DIRECTION_60XX_REG); return 1; } static int dio_60xx_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); writeb(s->state, priv(dev)->dio_counter_iobase + DIO_DATA_60XX_REG); } data[1] = readb(priv(dev)->dio_counter_iobase + DIO_DATA_60XX_REG); return 2; } static void caldac_write(struct comedi_device *dev, unsigned int channel, unsigned int value) { priv(dev)->caldac_state[channel] = value; switch (board(dev)->layout) { case LAYOUT_60XX: case LAYOUT_64XX: caldac_8800_write(dev, channel, value); break; case LAYOUT_4020: caldac_i2c_write(dev, channel, value); break; default: break; } } static int calib_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel = CR_CHAN(insn->chanspec); /* return immediately if setting hasn't changed, since * programming these things is slow */ if (priv(dev)->caldac_state[channel] == data[0]) return 1; caldac_write(dev, channel, data[0]); return 1; } static int calib_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int channel = CR_CHAN(insn->chanspec); data[0] = priv(dev)->caldac_state[channel]; return 1; } static void ad8402_write(struct comedi_device *dev, unsigned int channel, unsigned int value) { static const int bitstream_length = 10; unsigned int bit, register_bits; unsigned int bitstream = ((channel & 0x3) << 8) | (value & 0xff); static const int ad8402_udelay = 1; priv(dev)->ad8402_state[channel] = value; register_bits = SELECT_8402_64XX_BIT; udelay(ad8402_udelay); writew(register_bits, priv(dev)->main_iobase + CALIBRATION_REG); for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) { if (bitstream & bit) register_bits |= SERIAL_DATA_IN_BIT; else register_bits &= ~SERIAL_DATA_IN_BIT; udelay(ad8402_udelay); writew(register_bits, priv(dev)->main_iobase + CALIBRATION_REG); udelay(ad8402_udelay); writew(register_bits | SERIAL_CLOCK_BIT, priv(dev)->main_iobase + CALIBRATION_REG); } udelay(ad8402_udelay); writew(0, priv(dev)->main_iobase + CALIBRATION_REG); } /* for pci-das6402/16, channel 0 is analog input gain and channel 1 is offset */ static int ad8402_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel = CR_CHAN(insn->chanspec); /* return immediately if setting hasn't changed, since * programming these things is slow */ if (priv(dev)->ad8402_state[channel] == data[0]) return 1; priv(dev)->ad8402_state[channel] = data[0]; ad8402_write(dev, channel, data[0]); return 1; } static int ad8402_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int channel = CR_CHAN(insn->chanspec); data[0] = priv(dev)->ad8402_state[channel]; return 1; } static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address) { static const int bitstream_length = 11; static const int read_command = 0x6; unsigned int bitstream = (read_command << 8) | address; unsigned int bit; void __iomem * const plx_control_addr = priv(dev)->plx9080_iobase + PLX_CONTROL_REG; uint16_t value; static const int value_length = 16; static const int eeprom_udelay = 1; udelay(eeprom_udelay); priv(dev)->plx_control_bits &= ~CTL_EE_CLK & ~CTL_EE_CS; /* make sure we don't send anything to the i2c bus on 4020 */ priv(dev)->plx_control_bits |= CTL_USERO; writel(priv(dev)->plx_control_bits, plx_control_addr); /* activate serial eeprom */ udelay(eeprom_udelay); priv(dev)->plx_control_bits |= CTL_EE_CS; writel(priv(dev)->plx_control_bits, plx_control_addr); /* write read command and desired memory address */ for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) { /* set bit to be written */ udelay(eeprom_udelay); if (bitstream & bit) priv(dev)->plx_control_bits |= CTL_EE_W; else priv(dev)->plx_control_bits &= ~CTL_EE_W; writel(priv(dev)->plx_control_bits, plx_control_addr); /* clock in bit */ udelay(eeprom_udelay); priv(dev)->plx_control_bits |= CTL_EE_CLK; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(eeprom_udelay); priv(dev)->plx_control_bits &= ~CTL_EE_CLK; writel(priv(dev)->plx_control_bits, plx_control_addr); } /* read back value from eeprom memory location */ value = 0; for (bit = 1 << (value_length - 1); bit; bit >>= 1) { /* clock out bit */ udelay(eeprom_udelay); priv(dev)->plx_control_bits |= CTL_EE_CLK; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(eeprom_udelay); priv(dev)->plx_control_bits &= ~CTL_EE_CLK; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(eeprom_udelay); if (readl(plx_control_addr) & CTL_EE_R) value |= bit; } /* deactivate eeprom serial input */ udelay(eeprom_udelay); priv(dev)->plx_control_bits &= ~CTL_EE_CS; writel(priv(dev)->plx_control_bits, plx_control_addr); return value; } static int eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = read_eeprom(dev, CR_CHAN(insn->chanspec)); return 1; } /* utility function that rounds desired timing to an achievable time, and * sets cmd members appropriately. * adc paces conversions from master clock by dividing by (x + 3) where x is 24 bit number */ static void check_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd) { unsigned int convert_divisor = 0, scan_divisor; static const int min_convert_divisor = 3; static const int max_convert_divisor = max_counter_value + min_convert_divisor; static const int min_scan_divisor_4020 = 2; unsigned long long max_scan_divisor, min_scan_divisor; if (cmd->convert_src == TRIG_TIMER) { if (board(dev)->layout == LAYOUT_4020) { cmd->convert_arg = 0; } else { convert_divisor = get_divisor(cmd->convert_arg, cmd->flags); if (convert_divisor > max_convert_divisor) convert_divisor = max_convert_divisor; if (convert_divisor < min_convert_divisor) convert_divisor = min_convert_divisor; cmd->convert_arg = convert_divisor * TIMER_BASE; } } else if (cmd->convert_src == TRIG_NOW) cmd->convert_arg = 0; if (cmd->scan_begin_src == TRIG_TIMER) { scan_divisor = get_divisor(cmd->scan_begin_arg, cmd->flags); if (cmd->convert_src == TRIG_TIMER) { /* XXX check for integer overflows */ min_scan_divisor = convert_divisor * cmd->chanlist_len; max_scan_divisor = (convert_divisor * cmd->chanlist_len - 1) + max_counter_value; } else { min_scan_divisor = min_scan_divisor_4020; max_scan_divisor = max_counter_value + min_scan_divisor; } if (scan_divisor > max_scan_divisor) scan_divisor = max_scan_divisor; if (scan_divisor < min_scan_divisor) scan_divisor = min_scan_divisor; cmd->scan_begin_arg = scan_divisor * TIMER_BASE; } return; } /* Gets nearest achievable timing given master clock speed, does not * take into account possible minimum/maximum divisor values. Used * by other timing checking functions. */ static unsigned int get_divisor(unsigned int ns, unsigned int flags) { unsigned int divisor; switch (flags & TRIG_ROUND_MASK) { case TRIG_ROUND_UP: divisor = (ns + TIMER_BASE - 1) / TIMER_BASE; break; case TRIG_ROUND_DOWN: divisor = ns / TIMER_BASE; break; case TRIG_ROUND_NEAREST: default: divisor = (ns + TIMER_BASE / 2) / TIMER_BASE; break; } return divisor; } static unsigned int get_ao_divisor(unsigned int ns, unsigned int flags) { return get_divisor(ns, flags) - 2; } /* adjusts the size of hardware fifo (which determines block size for dma xfers) */ static int set_ai_fifo_size(struct comedi_device *dev, unsigned int num_samples) { unsigned int num_fifo_entries; int retval; const struct hw_fifo_info *const fifo = board(dev)->ai_fifo; num_fifo_entries = num_samples / fifo->sample_packing_ratio; retval = set_ai_fifo_segment_length(dev, num_fifo_entries / fifo->num_segments); if (retval < 0) return retval; num_samples = retval * fifo->num_segments * fifo->sample_packing_ratio; DEBUG_PRINT("set hardware fifo size to %i\n", num_samples); return num_samples; } /* query length of fifo */ static unsigned int ai_fifo_size(struct comedi_device *dev) { return priv(dev)->ai_fifo_segment_length * board(dev)->ai_fifo->num_segments * board(dev)->ai_fifo->sample_packing_ratio; } static int set_ai_fifo_segment_length(struct comedi_device *dev, unsigned int num_entries) { static const int increment_size = 0x100; const struct hw_fifo_info *const fifo = board(dev)->ai_fifo; unsigned int num_increments; uint16_t bits; if (num_entries < increment_size) num_entries = increment_size; if (num_entries > fifo->max_segment_length) num_entries = fifo->max_segment_length; /* 1 == 256 entries, 2 == 512 entries, etc */ num_increments = (num_entries + increment_size / 2) / increment_size; bits = (~(num_increments - 1)) & fifo->fifo_size_reg_mask; priv(dev)->fifo_size_bits &= ~fifo->fifo_size_reg_mask; priv(dev)->fifo_size_bits |= bits; writew(priv(dev)->fifo_size_bits, priv(dev)->main_iobase + FIFO_SIZE_REG); priv(dev)->ai_fifo_segment_length = num_increments * increment_size; DEBUG_PRINT("set hardware fifo segment length to %i\n", priv(dev)->ai_fifo_segment_length); return priv(dev)->ai_fifo_segment_length; } /* pci-6025 8800 caldac: * address 0 == dac channel 0 offset * address 1 == dac channel 0 gain * address 2 == dac channel 1 offset * address 3 == dac channel 1 gain * address 4 == fine adc offset * address 5 == coarse adc offset * address 6 == coarse adc gain * address 7 == fine adc gain */ /* pci-6402/16 uses all 8 channels for dac: * address 0 == dac channel 0 fine gain * address 1 == dac channel 0 coarse gain * address 2 == dac channel 0 coarse offset * address 3 == dac channel 1 coarse offset * address 4 == dac channel 1 fine gain * address 5 == dac channel 1 coarse gain * address 6 == dac channel 0 fine offset * address 7 == dac channel 1 fine offset */ static int caldac_8800_write(struct comedi_device *dev, unsigned int address, uint8_t value) { static const int num_caldac_channels = 8; static const int bitstream_length = 11; unsigned int bitstream = ((address & 0x7) << 8) | value; unsigned int bit, register_bits; static const int caldac_8800_udelay = 1; if (address >= num_caldac_channels) { comedi_error(dev, "illegal caldac channel"); return -1; } for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) { register_bits = 0; if (bitstream & bit) register_bits |= SERIAL_DATA_IN_BIT; udelay(caldac_8800_udelay); writew(register_bits, priv(dev)->main_iobase + CALIBRATION_REG); register_bits |= SERIAL_CLOCK_BIT; udelay(caldac_8800_udelay); writew(register_bits, priv(dev)->main_iobase + CALIBRATION_REG); } udelay(caldac_8800_udelay); writew(SELECT_8800_BIT, priv(dev)->main_iobase + CALIBRATION_REG); udelay(caldac_8800_udelay); writew(0, priv(dev)->main_iobase + CALIBRATION_REG); udelay(caldac_8800_udelay); return 0; } /* 4020 caldacs */ static int caldac_i2c_write(struct comedi_device *dev, unsigned int caldac_channel, unsigned int value) { uint8_t serial_bytes[3]; uint8_t i2c_addr; enum pointer_bits { /* manual has gain and offset bits switched */ OFFSET_0_2 = 0x1, GAIN_0_2 = 0x2, OFFSET_1_3 = 0x4, GAIN_1_3 = 0x8, }; enum data_bits { NOT_CLEAR_REGISTERS = 0x20, }; switch (caldac_channel) { case 0: /* chan 0 offset */ i2c_addr = CALDAC0_I2C_ADDR; serial_bytes[0] = OFFSET_0_2; break; case 1: /* chan 1 offset */ i2c_addr = CALDAC0_I2C_ADDR; serial_bytes[0] = OFFSET_1_3; break; case 2: /* chan 2 offset */ i2c_addr = CALDAC1_I2C_ADDR; serial_bytes[0] = OFFSET_0_2; break; case 3: /* chan 3 offset */ i2c_addr = CALDAC1_I2C_ADDR; serial_bytes[0] = OFFSET_1_3; break; case 4: /* chan 0 gain */ i2c_addr = CALDAC0_I2C_ADDR; serial_bytes[0] = GAIN_0_2; break; case 5: /* chan 1 gain */ i2c_addr = CALDAC0_I2C_ADDR; serial_bytes[0] = GAIN_1_3; break; case 6: /* chan 2 gain */ i2c_addr = CALDAC1_I2C_ADDR; serial_bytes[0] = GAIN_0_2; break; case 7: /* chan 3 gain */ i2c_addr = CALDAC1_I2C_ADDR; serial_bytes[0] = GAIN_1_3; break; default: comedi_error(dev, "invalid caldac channel\n"); return -1; break; } serial_bytes[1] = NOT_CLEAR_REGISTERS | ((value >> 8) & 0xf); serial_bytes[2] = value & 0xff; i2c_write(dev, i2c_addr, serial_bytes, 3); return 0; } /* Their i2c requires a huge delay on setting clock or data high for some reason */ static const int i2c_high_udelay = 1000; static const int i2c_low_udelay = 10; /* set i2c data line high or low */ static void i2c_set_sda(struct comedi_device *dev, int state) { static const int data_bit = CTL_EE_W; void __iomem *plx_control_addr = priv(dev)->plx9080_iobase + PLX_CONTROL_REG; if (state) { /* set data line high */ priv(dev)->plx_control_bits &= ~data_bit; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(i2c_high_udelay); } else { /* set data line low */ priv(dev)->plx_control_bits |= data_bit; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(i2c_low_udelay); } } /* set i2c clock line high or low */ static void i2c_set_scl(struct comedi_device *dev, int state) { static const int clock_bit = CTL_USERO; void __iomem *plx_control_addr = priv(dev)->plx9080_iobase + PLX_CONTROL_REG; if (state) { /* set clock line high */ priv(dev)->plx_control_bits &= ~clock_bit; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(i2c_high_udelay); } else { /* set clock line low */ priv(dev)->plx_control_bits |= clock_bit; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(i2c_low_udelay); } } static void i2c_write_byte(struct comedi_device *dev, uint8_t byte) { uint8_t bit; unsigned int num_bits = 8; DEBUG_PRINT("writing to i2c byte 0x%x\n", byte); for (bit = 1 << (num_bits - 1); bit; bit >>= 1) { i2c_set_scl(dev, 0); if ((byte & bit)) i2c_set_sda(dev, 1); else i2c_set_sda(dev, 0); i2c_set_scl(dev, 1); } } /* we can't really read the lines, so fake it */ static int i2c_read_ack(struct comedi_device *dev) { i2c_set_scl(dev, 0); i2c_set_sda(dev, 1); i2c_set_scl(dev, 1); return 0; /* return fake acknowledge bit */ } /* send start bit */ static void i2c_start(struct comedi_device *dev) { i2c_set_scl(dev, 1); i2c_set_sda(dev, 1); i2c_set_sda(dev, 0); } /* send stop bit */ static void i2c_stop(struct comedi_device *dev) { i2c_set_scl(dev, 0); i2c_set_sda(dev, 0); i2c_set_scl(dev, 1); i2c_set_sda(dev, 1); } static void i2c_write(struct comedi_device *dev, unsigned int address, const uint8_t * data, unsigned int length) { unsigned int i; uint8_t bitstream; static const int read_bit = 0x1; /* XXX need mutex to prevent simultaneous attempts to access eeprom and i2c bus */ /* make sure we dont send anything to eeprom */ priv(dev)->plx_control_bits &= ~CTL_EE_CS; i2c_stop(dev); i2c_start(dev); /* send address and write bit */ bitstream = (address << 1) & ~read_bit; i2c_write_byte(dev, bitstream); /* get acknowledge */ if (i2c_read_ack(dev) != 0) { comedi_error(dev, "i2c write failed: no acknowledge"); i2c_stop(dev); return; } /* write data bytes */ for (i = 0; i < length; i++) { i2c_write_byte(dev, data[i]); if (i2c_read_ack(dev) != 0) { comedi_error(dev, "i2c write failed: no acknowledge"); i2c_stop(dev); return; } } i2c_stop(dev); }
gpl-2.0
blue236/linux-1
net/netfilter/ipset/ip_set_hash_ipportnet.c
761
15929
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Kernel module implementing an IP set type: the hash:ip,port,net type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <net/tcp.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_hash.h> #define IPSET_TYPE_REV_MIN 0 /* 1 SCTP and UDPLITE support added */ /* 2 Range as input support for IPv4 added */ /* 3 nomatch flag support added */ /* 4 Counters support added */ /* 5 Comments support added */ /* 6 Forceadd support added */ #define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); IP_SET_MODULE_DESC("hash:ip,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_hash:ip,port,net"); /* Type specific function prefix */ #define HTYPE hash_ipportnet /* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0 * However this way we have to store internally cidr - 1, * dancing back and forth. */ #define IP_SET_HASH_WITH_NETS_PACKED #define IP_SET_HASH_WITH_PROTO #define IP_SET_HASH_WITH_NETS /* IPv4 variant */ /* Member elements */ struct hash_ipportnet4_elem { __be32 ip; __be32 ip2; __be16 port; u8 cidr:7; u8 nomatch:1; u8 proto; }; /* Common functions */ static inline bool hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1, const struct hash_ipportnet4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->ip2 == ip2->ip2 && ip1->cidr == ip2->cidr && ip1->port == ip2->port && ip1->proto == ip2->proto; } static inline int hash_ipportnet4_do_data_match(const struct hash_ipportnet4_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static inline void hash_ipportnet4_data_set_flags(struct hash_ipportnet4_elem *elem, u32 flags) { elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH); } static inline void hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static inline void hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr) { elem->ip2 &= ip_set_netmask(cidr); elem->cidr = cidr - 1; } static bool hash_ipportnet4_data_list(struct sk_buff *skb, const struct hash_ipportnet4_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return 0; nla_put_failure: return 1; } static inline void hash_ipportnet4_data_next(struct hash_ipportnet4_elem *next, const struct hash_ipportnet4_elem *d) { next->ip = d->ip; next->port = d->port; next->ip2 = d->ip2; } #define MTYPE hash_ipportnet4 #define PF 4 #define HOST_MASK 32 #include "ip_set_hash_gen.h" static int hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_ipportnet *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem e = { .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1, }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK - 1; if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2); e.ip2 &= ip_set_netmask(e.cidr + 1); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct hash_ipportnet *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, p = 0, port, port_to; u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2; bool with_ports = false; u8 cidr; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) || ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); if (ret) return ret; if (tb[IPSET_ATTR_CIDR2]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; e.cidr = cidr - 1; } if (tb[IPSET_ATTR_PORT]) e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); else return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else return -IPSET_ERR_MISSING_PROTO; if (!(with_ports || e.proto == IPPROTO_ICMP)) e.port = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); } with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; if (adt == IPSET_TEST || !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) { e.ip = htonl(ip); e.ip2 = htonl(ip2_from & ip_set_hostmask(e.cidr + 1)); ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } ip_to = ip; if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip > ip_to) swap(ip, ip_to); } else if (tb[IPSET_ATTR_CIDR]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!cidr || cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); } port_to = port = ntohs(e.port); if (tb[IPSET_ATTR_PORT_TO]) { port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); } ip2_to = ip2_from; if (tb[IPSET_ATTR_IP2_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); if (ret) return ret; if (ip2_from > ip2_to) swap(ip2_from, ip2_to); if (ip2_from + UINT_MAX == ip2_to) return -IPSET_ERR_HASH_RANGE; } else ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1); if (retried) ip = ntohl(h->next.ip); for (; !before(ip_to, ip); ip++) { e.ip = htonl(ip); p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) : port; for (; p <= port_to; p++) { e.port = htons(p); ip2 = retried && ip == ntohl(h->next.ip) && p == ntohs(h->next.port) ? ntohl(h->next.ip2) : ip2_from; while (!after(ip2, ip2_to)) { e.ip2 = htonl(ip2); ip2_last = ip_set_range_to_cidr(ip2, ip2_to, &cidr); e.cidr = cidr - 1; ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; ip2 = ip2_last + 1; } } } return ret; } /* IPv6 variant */ struct hash_ipportnet6_elem { union nf_inet_addr ip; union nf_inet_addr ip2; __be16 port; u8 cidr:7; u8 nomatch:1; u8 proto; }; /* Common functions */ static inline bool hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1, const struct hash_ipportnet6_elem *ip2, u32 *multi) { return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && ipv6_addr_equal(&ip1->ip2.in6, &ip2->ip2.in6) && ip1->cidr == ip2->cidr && ip1->port == ip2->port && ip1->proto == ip2->proto; } static inline int hash_ipportnet6_do_data_match(const struct hash_ipportnet6_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static inline void hash_ipportnet6_data_set_flags(struct hash_ipportnet6_elem *elem, u32 flags) { elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH); } static inline void hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static inline void hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr) { ip6_netmask(&elem->ip2, cidr); elem->cidr = cidr - 1; } static bool hash_ipportnet6_data_list(struct sk_buff *skb, const struct hash_ipportnet6_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return 0; nla_put_failure: return 1; } static inline void hash_ipportnet6_data_next(struct hash_ipportnet4_elem *next, const struct hash_ipportnet6_elem *d) { next->port = d->port; } #undef MTYPE #undef PF #undef HOST_MASK #define MTYPE hash_ipportnet6 #define PF 6 #define HOST_MASK 128 #define IP_SET_EMIT_CREATE #include "ip_set_hash_gen.h" static int hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_ipportnet *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet6_elem e = { .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1, }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK - 1; if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6); ip6_netmask(&e.ip2, e.cidr + 1); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct hash_ipportnet *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet6_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port, port_to; bool with_ports = false; u8 cidr; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) || ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2); if (ret) return ret; if (tb[IPSET_ATTR_CIDR2]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; e.cidr = cidr - 1; } ip6_netmask(&e.ip2, e.cidr + 1); if (tb[IPSET_ATTR_PORT]) e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); else return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else return -IPSET_ERR_MISSING_PROTO; if (!(with_ports || e.proto == IPPROTO_ICMPV6)) e.port = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); } if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } port = ntohs(e.port); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); if (retried) port = ntohs(h->next.port); for (; port <= port_to; port++) { e.port = htons(port); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; } return ret; } static struct ip_set_type hash_ipportnet_type __read_mostly = { .name = "hash:ip,port,net", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 | IPSET_TYPE_NOMATCH, .dimension = IPSET_DIM_THREE, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create = hash_ipportnet_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IP2] = { .type = NLA_NESTED }, [IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_PORT] = { .type = NLA_U16 }, [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR2] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init hash_ipportnet_init(void) { return ip_set_type_register(&hash_ipportnet_type); } static void __exit hash_ipportnet_fini(void) { ip_set_type_unregister(&hash_ipportnet_type); } module_init(hash_ipportnet_init); module_exit(hash_ipportnet_fini);
gpl-2.0
mikeNG/android_kernel_oneplus_msm8974
lib/nlattr.c
761
12724
/* * NETLINK Netlink attributes * * Authors: Thomas Graf <tgraf@suug.ch> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/jiffies.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ratelimit.h> #include <net/netlink.h> static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_U8] = sizeof(u8), [NLA_U16] = sizeof(u16), [NLA_U32] = sizeof(u32), [NLA_U64] = sizeof(u64), [NLA_MSECS] = sizeof(u64), [NLA_NESTED] = NLA_HDRLEN, }; static int validate_nla(const struct nlattr *nla, int maxtype, const struct nla_policy *policy) { const struct nla_policy *pt; int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); if (type <= 0 || type > maxtype) return 0; pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); switch (pt->type) { case NLA_FLAG: if (attrlen > 0) return -ERANGE; break; case NLA_NUL_STRING: if (pt->len) minlen = min_t(int, attrlen, pt->len + 1); else minlen = attrlen; if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) return -EINVAL; /* fall through */ case NLA_STRING: if (attrlen < 1) return -ERANGE; if (pt->len) { char *buf = nla_data(nla); if (buf[attrlen - 1] == '\0') attrlen--; if (attrlen > pt->len) return -ERANGE; } break; case NLA_BINARY: if (pt->len && attrlen > pt->len) return -ERANGE; break; case NLA_NESTED_COMPAT: if (attrlen < pt->len) return -ERANGE; if (attrlen < NLA_ALIGN(pt->len)) break; if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN) return -ERANGE; nla = nla_data(nla) + NLA_ALIGN(pt->len); if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla)) return -ERANGE; break; case NLA_NESTED: /* a nested attributes is allowed to be empty; if its not, * it must have a size of at least NLA_HDRLEN. */ if (attrlen == 0) break; default: if (pt->len) minlen = pt->len; else if (pt->type != NLA_UNSPEC) minlen = nla_attr_minlen[pt->type]; if (attrlen < minlen) return -ERANGE; } return 0; } /** * nla_validate - Validate a stream of attributes * @head: head of attribute stream * @len: length of attribute stream * @maxtype: maximum attribute type to be expected * @policy: validation policy * * Validates all attributes in the specified attribute stream against the * specified policy. Attributes with a type exceeding maxtype will be * ignored. See documenation of struct nla_policy for more details. * * Returns 0 on success or a negative error code. */ int nla_validate(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy) { const struct nlattr *nla; int rem, err; nla_for_each_attr(nla, head, len, rem) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } err = 0; errout: return err; } /** * nla_policy_len - Determin the max. length of a policy * @policy: policy to use * @n: number of policies * * Determines the max. length of the policy. It is currently used * to allocated Netlink buffers roughly the size of the actual * message. * * Returns 0 on success or a negative error code. */ int nla_policy_len(const struct nla_policy *p, int n) { int i, len = 0; for (i = 0; i < n; i++, p++) { if (p->len) len += nla_total_size(p->len); else if (nla_attr_minlen[p->type]) len += nla_total_size(nla_attr_minlen[p->type]); } return len; } /** * nla_parse - Parse a stream of attributes into a tb buffer * @tb: destination array with maxtype+1 elements * @maxtype: maximum attribute type to be expected * @head: head of attribute stream * @len: length of attribute stream * @policy: validation policy * * Parses a stream of attributes and stores a pointer to each attribute in * the tb array accessible via the attribute type. Attributes with a type * exceeding maxtype will be silently ignored for backwards compatibility * reasons. policy may be set to NULL if no validation is required. * * Returns 0 on success or a negative error code. */ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, const struct nla_policy *policy) { const struct nlattr *nla; int rem, err; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { u16 type = nla_type(nla); if (type > 0 && type <= maxtype) { if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } tb[type] = (struct nlattr *)nla; } } if (unlikely(rem > 0)) pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n", rem, current->comm); err = 0; errout: return err; } /** * nla_find - Find a specific attribute in a stream of attributes * @head: head of attribute stream * @len: length of attribute stream * @attrtype: type of attribute to look for * * Returns the first attribute in the stream matching the specified type. */ struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype) { const struct nlattr *nla; int rem; nla_for_each_attr(nla, head, len, rem) if (nla_type(nla) == attrtype) return (struct nlattr *)nla; return NULL; } /** * nla_strlcpy - Copy string attribute payload into a sized buffer * @dst: where to copy the string to * @nla: attribute to copy the string from * @dstsize: size of destination buffer * * Copies at most dstsize - 1 bytes into the destination buffer. * The result is always a valid NUL-terminated string. Unlike * strlcpy the destination buffer is always padded out. * * Returns the length of the source buffer. */ size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize) { size_t srclen = nla_len(nla); char *src = nla_data(nla); if (srclen > 0 && src[srclen - 1] == '\0') srclen--; if (dstsize > 0) { size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen; memset(dst, 0, dstsize); memcpy(dst, src, len); } return srclen; } /** * nla_memcpy - Copy a netlink attribute into another memory area * @dest: where to copy to memcpy * @src: netlink attribute to copy from * @count: size of the destination area * * Note: The number of bytes copied is limited by the length of * attribute's payload. memcpy * * Returns the number of bytes copied. */ int nla_memcpy(void *dest, const struct nlattr *src, int count) { int minlen = min_t(int, count, nla_len(src)); memcpy(dest, nla_data(src), minlen); return minlen; } /** * nla_memcmp - Compare an attribute with sized memory area * @nla: netlink attribute * @data: memory area * @size: size of memory area */ int nla_memcmp(const struct nlattr *nla, const void *data, size_t size) { int d = nla_len(nla) - size; if (d == 0) d = memcmp(nla_data(nla), data, size); return d; } /** * nla_strcmp - Compare a string attribute against a string * @nla: netlink string attribute * @str: another string */ int nla_strcmp(const struct nlattr *nla, const char *str) { int len = strlen(str); char *buf = nla_data(nla); int attrlen = nla_len(nla); int d; if (attrlen > 0 && buf[attrlen - 1] == '\0') attrlen--; d = attrlen - len; if (d == 0) d = memcmp(nla_data(nla), str, len); return d; } #ifdef CONFIG_NET /** * __nla_reserve - reserve room for attribute on the skb * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { struct nlattr *nla; nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen)); nla->nla_type = attrtype; nla->nla_len = nla_attr_size(attrlen); memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen)); return nla; } EXPORT_SYMBOL(__nla_reserve); /** * __nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on * @attrlen: length of attribute payload * * Reserves room for attribute payload without a header. * * The caller is responsible to ensure that the skb provides enough * tailroom for the payload. */ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { void *start; start = skb_put(skb, NLA_ALIGN(attrlen)); memset(start, 0, NLA_ALIGN(attrlen)); return start; } EXPORT_SYMBOL(__nla_reserve_nohdr); /** * nla_reserve - reserve room for attribute on the skb * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute header and payload. */ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) return NULL; return __nla_reserve(skb, attrtype, attrlen); } EXPORT_SYMBOL(nla_reserve); /** * nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on * @attrlen: length of attribute payload * * Reserves room for attribute payload without a header. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute payload. */ void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return NULL; return __nla_reserve_nohdr(skb, attrlen); } EXPORT_SYMBOL(nla_reserve_nohdr); /** * __nla_put - Add a netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) { struct nlattr *nla; nla = __nla_reserve(skb, attrtype, attrlen); memcpy(nla_data(nla), data, attrlen); } EXPORT_SYMBOL(__nla_put); /** * __nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute payload. */ void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) { void *start; start = __nla_reserve_nohdr(skb, attrlen); memcpy(start, data, attrlen); } EXPORT_SYMBOL(__nla_put_nohdr); /** * nla_put - Add a netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute header and payload. */ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) return -EMSGSIZE; __nla_put(skb, attrtype, attrlen, data); return 0; } EXPORT_SYMBOL(nla_put); /** * nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute payload. */ int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; __nla_put_nohdr(skb, attrlen, data); return 0; } EXPORT_SYMBOL(nla_put_nohdr); /** * nla_append - Add a netlink attribute without header or padding * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute payload. */ int nla_append(struct sk_buff *skb, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; memcpy(skb_put(skb, attrlen), data, attrlen); return 0; } EXPORT_SYMBOL(nla_append); #endif EXPORT_SYMBOL(nla_validate); EXPORT_SYMBOL(nla_policy_len); EXPORT_SYMBOL(nla_parse); EXPORT_SYMBOL(nla_find); EXPORT_SYMBOL(nla_strlcpy); EXPORT_SYMBOL(nla_memcpy); EXPORT_SYMBOL(nla_memcmp); EXPORT_SYMBOL(nla_strcmp);
gpl-2.0
cowithgun/samsung-kernel-ancora
net/bridge/br_notify.c
761
2430
/* * Device event handling * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/rtnetlink.h> #include <net/net_namespace.h> #include "br_private.h" static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr); struct notifier_block br_device_notifier = { .notifier_call = br_device_event }; /* * Handle changes in state of network devices enslaved to a bridge. * * Note: don't care about up/down if bridge itself is down, because * port state is checked when bridge is brought up. */ static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct net_bridge_port *p = dev->br_port; struct net_bridge *br; int err; /* not a port of a bridge */ if (p == NULL) return NOTIFY_DONE; br = p->br; switch (event) { case NETDEV_CHANGEMTU: dev_set_mtu(br->dev, br_min_mtu(br)); break; case NETDEV_CHANGEADDR: spin_lock_bh(&br->lock); br_fdb_changeaddr(p, dev->dev_addr); br_stp_recalculate_bridge_id(br); spin_unlock_bh(&br->lock); break; case NETDEV_CHANGE: br_port_carrier_check(p); break; case NETDEV_FEAT_CHANGE: spin_lock_bh(&br->lock); if (netif_running(br->dev)) br_features_recompute(br); spin_unlock_bh(&br->lock); break; case NETDEV_DOWN: spin_lock_bh(&br->lock); if (br->dev->flags & IFF_UP) br_stp_disable_port(p); spin_unlock_bh(&br->lock); break; case NETDEV_UP: if (netif_carrier_ok(dev) && (br->dev->flags & IFF_UP)) { spin_lock_bh(&br->lock); br_stp_enable_port(p); spin_unlock_bh(&br->lock); } break; case NETDEV_UNREGISTER: br_del_if(br, dev); break; case NETDEV_CHANGENAME: err = br_sysfs_renameif(p); if (err) return notifier_from_errno(err); break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid underlaying device to change its type. */ return NOTIFY_BAD; } /* Events that may cause spanning tree to refresh */ if (event == NETDEV_CHANGEADDR || event == NETDEV_UP || event == NETDEV_CHANGE || event == NETDEV_DOWN) br_ifinfo_notify(RTM_NEWLINK, p); return NOTIFY_DONE; }
gpl-2.0
TDR/Epic4GTouch-Kernel
drivers/char/agp/efficeon-agp.c
761
12657
/* * Transmeta's Efficeon AGPGART driver. * * Based upon a diff by Linus around November '02. * * Ported to the 2.6 kernel by Carlos Puchol <cpglinux@puchol.com> * and H. Peter Anvin <hpa@transmeta.com>. */ /* * NOTE-cpg-040217: * * - when compiled as a module, after loading the module, * it will refuse to unload, indicating it is in use, * when it is not. * - no s3 (suspend to ram) testing. * - tested on the efficeon integrated nothbridge for tens * of iterations of starting x and glxgears. * - tested with radeon 9000 and radeon mobility m9 cards * - tested with c3/c4 enabled (with the mobility m9 card) */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/gfp.h> #include <linux/page-flags.h> #include <linux/mm.h> #include "agp.h" #include "intel-agp.h" /* * The real differences to the generic AGP code is * in the GART mappings - a two-level setup with the * first level being an on-chip 64-entry table. * * The page array is filled through the ATTPAGE register * (Aperture Translation Table Page Register) at 0xB8. Bits: * 31:20: physical page address * 11:9: Page Attribute Table Index (PATI) * must match the PAT index for the * mapped pages (the 2nd level page table pages * themselves should be just regular WB-cacheable, * so this is normally zero.) * 8: Present * 7:6: reserved, write as zero * 5:0: GATT directory index: which 1st-level entry * * The Efficeon AGP spec requires pages to be WB-cacheable * but to be explicitly CLFLUSH'd after any changes. */ #define EFFICEON_ATTPAGE 0xb8 #define EFFICEON_L1_SIZE 64 /* Number of PDE pages */ #define EFFICEON_PATI (0 << 9) #define EFFICEON_PRESENT (1 << 8) static struct _efficeon_private { unsigned long l1_table[EFFICEON_L1_SIZE]; } efficeon_private; static const struct gatt_mask efficeon_generic_masks[] = { {.mask = 0x00000001, .type = 0} }; /* This function does the same thing as mask_memory() for this chipset... */ static inline unsigned long efficeon_mask_memory(struct page *page) { unsigned long addr = page_to_phys(page); return addr | 0x00000001; } static const struct aper_size_info_lvl2 efficeon_generic_sizes[4] = { {256, 65536, 0}, {128, 32768, 32}, {64, 16384, 48}, {32, 8192, 56} }; /* * Control interfaces are largely identical to * the legacy Intel 440BX.. */ static int efficeon_fetch_size(void) { int i; u16 temp; struct aper_size_info_lvl2 *values; pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp); values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static void efficeon_tlbflush(struct agp_memory * mem) { printk(KERN_DEBUG PFX "efficeon_tlbflush()\n"); pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200); pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); } static void efficeon_cleanup(void) { u16 temp; struct aper_size_info_lvl2 *previous_size; printk(KERN_DEBUG PFX "efficeon_cleanup()\n"); previous_size = A_SIZE_LVL2(agp_bridge->previous_size); pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); } static int efficeon_configure(void) { u32 temp; u16 temp2; struct aper_size_info_lvl2 *current_size; printk(KERN_DEBUG PFX "efficeon_configure()\n"); current_size = A_SIZE_LVL2(agp_bridge->current_size); /* aperture size */ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); /* paccfg/nbxcfg */ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, (temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11)); /* clear any possible error conditions */ pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7); return 0; } static int efficeon_free_gatt_table(struct agp_bridge_data *bridge) { int index, freed = 0; for (index = 0; index < EFFICEON_L1_SIZE; index++) { unsigned long page = efficeon_private.l1_table[index]; if (page) { efficeon_private.l1_table[index] = 0; ClearPageReserved(virt_to_page((char *)page)); free_page(page); freed++; } printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n", agp_bridge->dev, EFFICEON_ATTPAGE, index); pci_write_config_dword(agp_bridge->dev, EFFICEON_ATTPAGE, index); } printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed); return 0; } /* * Since we don't need contiguous memory we just try * to get the gatt table once */ #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) #undef GET_GATT #define GET_GATT(addr) (efficeon_private.gatt_pages[\ GET_PAGE_DIR_IDX(addr)]->remapped) static int efficeon_create_gatt_table(struct agp_bridge_data *bridge) { int index; const int pati = EFFICEON_PATI; const int present = EFFICEON_PRESENT; const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3; int num_entries, l1_pages; num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries); /* There are 2^10 PTE pages per PDE page */ BUG_ON(num_entries & 0x3ff); l1_pages = num_entries >> 10; for (index = 0 ; index < l1_pages ; index++) { int offset; unsigned long page; unsigned long value; page = efficeon_private.l1_table[index]; BUG_ON(page); page = get_zeroed_page(GFP_KERNEL); if (!page) { efficeon_free_gatt_table(agp_bridge); return -ENOMEM; } SetPageReserved(virt_to_page((char *)page)); for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk) clflush((char *)page+offset); efficeon_private.l1_table[index] = page; value = virt_to_phys((unsigned long *)page) | pati | present | index; pci_write_config_dword(agp_bridge->dev, EFFICEON_ATTPAGE, value); } return 0; } static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type) { int i, count = mem->page_count, num_entries; unsigned int *page, *last_page; const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3; const unsigned long clflush_mask = ~(clflush_chunk-1); printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count); num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; if (type != 0 || mem->type != 0) return -EINVAL; if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } last_page = NULL; for (i = 0; i < count; i++) { int index = pg_start + i; unsigned long insert = efficeon_mask_memory(mem->pages[i]); page = (unsigned int *) efficeon_private.l1_table[index >> 10]; if (!page) continue; page += (index & 0x3ff); *page = insert; /* clflush is slow, so don't clflush until we have to */ if (last_page && (((unsigned long)page^(unsigned long)last_page) & clflush_mask)) clflush(last_page); last_page = page; } if ( last_page ) clflush(last_page); agp_bridge->driver->tlb_flush(mem); return 0; } static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type) { int i, count = mem->page_count, num_entries; printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count); num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; if (type != 0 || mem->type != 0) return -EINVAL; for (i = 0; i < count; i++) { int index = pg_start + i; unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10]; if (!page) continue; page += (index & 0x3ff); *page = 0; } agp_bridge->driver->tlb_flush(mem); return 0; } static const struct agp_bridge_driver efficeon_driver = { .owner = THIS_MODULE, .aperture_sizes = efficeon_generic_sizes, .size_type = LVL2_APER_SIZE, .num_aperture_sizes = 4, .configure = efficeon_configure, .fetch_size = efficeon_fetch_size, .cleanup = efficeon_cleanup, .tlb_flush = efficeon_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = efficeon_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, // Efficeon-specific GATT table setup / populate / teardown .create_gatt_table = efficeon_create_gatt_table, .free_gatt_table = efficeon_free_gatt_table, .insert_memory = efficeon_insert_memory, .remove_memory = efficeon_remove_memory, .cant_use_aperture = false, // true might be faster? // Generic .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static int __devinit agp_efficeon_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; struct resource *r; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; /* Probe for Efficeon controller */ if (pdev->device != PCI_DEVICE_ID_EFFICEON) { printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n", pdev->device); return -ENODEV; } printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n"); bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &efficeon_driver; bridge->dev = pdev; bridge->capndx = cap_ptr; /* * The following fixes the case where the BIOS has "forgotten" to * provide an address range for the GART. * 20030610 - hamish@zot.org */ r = &pdev->resource[0]; if (!r->start && r->end) { if (pci_assign_resource(pdev, 0)) { printk(KERN_ERR PFX "could not assign resource 0\n"); agp_put_bridge(bridge); return -ENODEV; } } /* * If the device has not been properly setup, the following will catch * the problem and should stop the system from crashing. * 20030610 - hamish@zot.org */ if (pci_enable_device(pdev)) { printk(KERN_ERR PFX "Unable to Enable PCI device\n"); agp_put_bridge(bridge); return -ENODEV; } /* Fill in the mode register */ if (cap_ptr) { pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); } pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void __devexit agp_efficeon_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } #ifdef CONFIG_PM static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state) { return 0; } static int agp_efficeon_resume(struct pci_dev *pdev) { printk(KERN_DEBUG PFX "agp_efficeon_resume()\n"); return efficeon_configure(); } #endif static struct pci_device_id agp_efficeon_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_TRANSMETA, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table); static struct pci_driver agp_efficeon_pci_driver = { .name = "agpgart-efficeon", .id_table = agp_efficeon_pci_table, .probe = agp_efficeon_probe, .remove = agp_efficeon_remove, #ifdef CONFIG_PM .suspend = agp_efficeon_suspend, .resume = agp_efficeon_resume, #endif }; static int __init agp_efficeon_init(void) { static int agp_initialised=0; if (agp_off) return -EINVAL; if (agp_initialised == 1) return 0; agp_initialised=1; return pci_register_driver(&agp_efficeon_pci_driver); } static void __exit agp_efficeon_cleanup(void) { pci_unregister_driver(&agp_efficeon_pci_driver); } module_init(agp_efficeon_init); module_exit(agp_efficeon_cleanup); MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>"); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
columbia/linux-2.6-mutable
drivers/scsi/mpt2sas/mpt2sas_config.c
761
46368
/* * This module provides common API for accessing firmware configuration pages * * This code is based on drivers/scsi/mpt2sas/mpt2_base.c * Copyright (C) 2007-2010 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include <linux/version.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/blkdev.h> #include <linux/sched.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/slab.h> #include "mpt2sas_base.h" /* local definitions */ /* Timeout for config page request (in seconds) */ #define MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT 15 /* Common sgl flags for READING a config page. */ #define MPT2_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT) /* Common sgl flags for WRITING a config page. */ #define MPT2_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \ << MPI2_SGE_FLAGS_SHIFT) /** * struct config_request - obtain dma memory via routine * @sz: size * @page: virt pointer * @page_dma: phys pointer * */ struct config_request{ u16 sz; void *page; dma_addr_t page_dma; }; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _config_display_some_debug - debug routine * @ioc: per adapter object * @smid: system request message index * @calling_function_name: string pass from calling function * @mpi_reply: reply message frame * Context: none. * * Function for displaying debug info helpfull when debugging issues * in this module. */ static void _config_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid, char *calling_function_name, MPI2DefaultReply_t *mpi_reply) { Mpi2ConfigRequest_t *mpi_request; char *desc = NULL; if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) return; mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) { case MPI2_CONFIG_PAGETYPE_IO_UNIT: desc = "io_unit"; break; case MPI2_CONFIG_PAGETYPE_IOC: desc = "ioc"; break; case MPI2_CONFIG_PAGETYPE_BIOS: desc = "bios"; break; case MPI2_CONFIG_PAGETYPE_RAID_VOLUME: desc = "raid_volume"; break; case MPI2_CONFIG_PAGETYPE_MANUFACTURING: desc = "manufaucturing"; break; case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK: desc = "physdisk"; break; case MPI2_CONFIG_PAGETYPE_EXTENDED: switch (mpi_request->ExtPageType) { case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT: desc = "sas_io_unit"; break; case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER: desc = "sas_expander"; break; case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE: desc = "sas_device"; break; case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY: desc = "sas_phy"; break; case MPI2_CONFIG_EXTPAGETYPE_LOG: desc = "log"; break; case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE: desc = "enclosure"; break; case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG: desc = "raid_config"; break; case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING: desc = "driver_mappping"; break; } break; } if (!desc) return; printk(MPT2SAS_DEBUG_FMT "%s: %s(%d), action(%d), form(0x%08x), " "smid(%d)\n", ioc->name, calling_function_name, desc, mpi_request->Header.PageNumber, mpi_request->Action, le32_to_cpu(mpi_request->PageAddress), smid); if (!mpi_reply) return; if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) printk(MPT2SAS_DEBUG_FMT "\tiocstatus(0x%04x), loginfo(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo)); } #endif /** * _config_alloc_config_dma_memory - obtain physical memory * @ioc: per adapter object * @mem: struct config_request * * A wrapper for obtaining dma-able memory for config page request. * * Returns 0 for success, non-zero for failure. */ static int _config_alloc_config_dma_memory(struct MPT2SAS_ADAPTER *ioc, struct config_request *mem) { int r = 0; if (mem->sz > ioc->config_page_sz) { mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, &mem->page_dma, GFP_KERNEL); if (!mem->page) { printk(MPT2SAS_ERR_FMT "%s: dma_alloc_coherent" " failed asking for (%d) bytes!!\n", ioc->name, __func__, mem->sz); r = -ENOMEM; } } else { /* use tmp buffer if less than 512 bytes */ mem->page = ioc->config_page; mem->page_dma = ioc->config_page_dma; } return r; } /** * _config_free_config_dma_memory - wrapper to free the memory * @ioc: per adapter object * @mem: struct config_request * * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. * * Returns 0 for success, non-zero for failure. */ static void _config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc, struct config_request *mem) { if (mem->sz > ioc->config_page_sz) dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page, mem->page_dma); } /** * mpt2sas_config_done - config page completion routine * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: none. * * The callback handler when using _config_request. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ u8 mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { MPI2DefaultReply_t *mpi_reply; if (ioc->config_cmds.status == MPT2_CMD_NOT_USED) return 1; if (ioc->config_cmds.smid != smid) return 1; ioc->config_cmds.status |= MPT2_CMD_COMPLETE; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (mpi_reply) { ioc->config_cmds.status |= MPT2_CMD_REPLY_VALID; memcpy(ioc->config_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); } ioc->config_cmds.status &= ~MPT2_CMD_PENDING; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _config_display_some_debug(ioc, smid, "config_done", mpi_reply); #endif ioc->config_cmds.smid = USHRT_MAX; complete(&ioc->config_cmds.done); return 1; } /** * _config_request - main routine for sending config page requests * @ioc: per adapter object * @mpi_request: request message frame * @mpi_reply: reply mf payload returned from firmware * @timeout: timeout in seconds * @config_page: contents of the config page * @config_page_sz: size of config page * Context: sleep * * A generic API for config page requests to firmware. * * The ioc->config_cmds.status flag should be MPT2_CMD_NOT_USED before calling * this API. * * The callback index is set inside `ioc->config_cb_idx. * * Returns 0 for success, non-zero for failure. */ static int _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout, void *config_page, u16 config_page_sz) { u16 smid; u32 ioc_state; unsigned long timeleft; Mpi2ConfigRequest_t *config_request; int r; u8 retry_count, issue_host_reset = 0; u16 wait_state_count; struct config_request mem; mutex_lock(&ioc->config_cmds.mutex); if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n", ioc->name, __func__); mutex_unlock(&ioc->config_cmds.mutex); return -EAGAIN; } retry_count = 0; memset(&mem, 0, sizeof(struct config_request)); mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; if (config_page) { mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion; mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber; mpi_request->Header.PageType = mpi_reply->Header.PageType; mpi_request->Header.PageLength = mpi_reply->Header.PageLength; mpi_request->ExtPageLength = mpi_reply->ExtPageLength; mpi_request->ExtPageType = mpi_reply->ExtPageType; if (mpi_request->Header.PageLength) mem.sz = mpi_request->Header.PageLength * 4; else mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; r = _config_alloc_config_dma_memory(ioc, &mem); if (r != 0) goto out; if (mpi_request->Action == MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT || mpi_request->Action == MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) { ioc->base_add_sg_single(&mpi_request->PageBufferSGE, MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz, mem.page_dma); memcpy(mem.page, config_page, min_t(u16, mem.sz, config_page_sz)); } else { memset(config_page, 0, config_page_sz); ioc->base_add_sg_single(&mpi_request->PageBufferSGE, MPT2_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma); } } retry_config: if (retry_count) { if (retry_count > 2) { /* attempt only 2 retries */ r = -EFAULT; goto free_mem; } printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n", ioc->name, __func__, retry_count); } wait_state_count = 0; ioc_state = mpt2sas_base_get_iocstate(ioc, 1); while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { if (wait_state_count++ == MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT) { printk(MPT2SAS_ERR_FMT "%s: failed due to ioc not operational\n", ioc->name, __func__); ioc->config_cmds.status = MPT2_CMD_NOT_USED; r = -EFAULT; goto free_mem; } ssleep(1); ioc_state = mpt2sas_base_get_iocstate(ioc, 1); printk(MPT2SAS_INFO_FMT "%s: waiting for " "operational state(count=%d)\n", ioc->name, __func__, wait_state_count); } if (wait_state_count) printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n", ioc->name, __func__); smid = mpt2sas_base_get_smid(ioc, ioc->config_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); ioc->config_cmds.status = MPT2_CMD_NOT_USED; r = -EAGAIN; goto free_mem; } r = 0; memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t)); ioc->config_cmds.status = MPT2_CMD_PENDING; config_request = mpt2sas_base_get_msg_frame(ioc, smid); ioc->config_cmds.smid = smid; memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t)); #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _config_display_some_debug(ioc, smid, "config_request", NULL); #endif init_completion(&ioc->config_cmds.done); mpt2sas_base_put_smid_default(ioc, smid); timeleft = wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); if (!(ioc->config_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2ConfigRequest_t)/4); retry_count++; if (ioc->config_cmds.smid == smid) mpt2sas_base_free_smid(ioc, smid); if ((ioc->shost_recovery) || (ioc->config_cmds.status & MPT2_CMD_RESET)) goto retry_config; issue_host_reset = 1; r = -EFAULT; goto free_mem; } if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID) memcpy(mpi_reply, ioc->config_cmds.reply, sizeof(Mpi2ConfigReply_t)); if (retry_count) printk(MPT2SAS_INFO_FMT "%s: retry (%d) completed!!\n", ioc->name, __func__, retry_count); if (config_page && mpi_request->Action == MPI2_CONFIG_ACTION_PAGE_READ_CURRENT) memcpy(config_page, mem.page, min_t(u16, mem.sz, config_page_sz)); free_mem: if (config_page) _config_free_config_dma_memory(ioc, &mem); out: ioc->config_cmds.status = MPT2_CMD_NOT_USED; mutex_unlock(&ioc->config_cmds.mutex); if (issue_host_reset) mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); return r; } /** * mpt2sas_config_get_manufacturing_pg0 - obtain manufacturing page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_manufacturing_pg10 - obtain manufacturing page 10 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; mpi_request.Header.PageNumber = 10; mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_bios_pg2 - obtain bios page 2 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; mpi_request.Header.PageNumber = 2; mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_bios_pg3 - obtain bios page 3 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage3_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; mpi_request.Header.PageNumber = 3; mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_iounit_pg0 - obtain iounit page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_iounit_pg1 - obtain iounit page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_set_iounit_pg1 - set iounit page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; mpi_request.Header.PageNumber = 8; mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_sas_device_pg0 - obtain sas device page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: device handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, u32 form, u32 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION; mpi_request.Header.PageNumber = 0; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_sas_device_pg1 - obtain sas device page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: device handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, u32 form, u32 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION; mpi_request.Header.PageNumber = 1; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_number_hba_phys - obtain number of phys on the host * @ioc: per adapter object * @num_phys: pointer returned with the number of phys * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys) { Mpi2ConfigRequest_t mpi_request; int r; u16 ioc_status; Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage0_t config_page; *num_phys = 0; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, sizeof(Mpi2SasIOUnitPage0_t)); if (!r) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) *num_phys = config_page.NumPhys; } out: return r; } /** * mpt2sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @sz: size of buffer passed in config_page * Context: sleep. * * Calling function should call config_get_number_hba_phys prior to * this function, so enough memory is allocated for config_page. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, u16 sz) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); out: return r; } /** * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @sz: size of buffer passed in config_page * Context: sleep. * * Calling function should call config_get_number_hba_phys prior to * this function, so enough memory is allocated for config_page. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); out: return r; } /** * mpt2sas_config_set_sas_iounit_pg1 - send sas iounit page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @sz: size of buffer passed in config_page * Context: sleep. * * Calling function should call config_get_number_hba_phys prior to * this function, so enough memory is allocated for config_page. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); out: return r; } /** * mpt2sas_config_get_expander_pg0 - obtain expander page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: expander handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_expander_pg1 - obtain expander page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @phy_number: phy number * @handle: expander handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number, u16 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM | (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_enclosure_pg0 - obtain enclosure page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: expander handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_phy_pg0 - obtain phy page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @phy_number: phy number * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_phy_pg1 - obtain phy page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @phy_number: phy number * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_raid_volume_pg1 - obtain raid volume page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: volume handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, u32 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_number_pds - obtain number of phys disk assigned to volume * @ioc: per adapter object * @handle: volume handle * @num_pds: returns pds count * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 *num_pds) { Mpi2ConfigRequest_t mpi_request; Mpi2RaidVolPage0_t config_page; Mpi2ConfigReply_t mpi_reply; int r; u16 ioc_status; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); *num_pds = 0; mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, sizeof(Mpi2RaidVolPage0_t)); if (!r) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) *num_pds = config_page.NumPhysDisks; } out: return r; } /** * mpt2sas_config_get_raid_volume_pg0 - obtain raid volume page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: volume handle * @sz: size of buffer passed in config_page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, u32 handle, u16 sz) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); out: return r; } /** * mpt2sas_config_get_phys_disk_pg0 - obtain phys disk page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE * @form_specific: specific to the form * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form, u32 form_specific) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | form_specific); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_volume_handle - returns volume handle for give hidden raid components * @ioc: per adapter object * @pd_handle: phys disk handle * @volume_handle: volume handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, u16 *volume_handle) { Mpi2RaidConfigurationPage0_t *config_page = NULL; Mpi2ConfigRequest_t mpi_request; Mpi2ConfigReply_t mpi_reply; int r, i, config_page_sz; u16 ioc_status; *volume_handle = 0; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG; mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION; mpi_request.Header.PageNumber = 0; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); config_page = kmalloc(config_page_sz, GFP_KERNEL); if (!config_page) goto out; r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, config_page_sz); if (r) goto out; r = -1; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) goto out; for (i = 0; i < config_page->NumElements; i++) { if ((le16_to_cpu(config_page->ConfigElement[i].ElementFlags) & MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) != MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT) continue; if (le16_to_cpu(config_page->ConfigElement[i]. PhysDiskDevHandle) == pd_handle) { *volume_handle = le16_to_cpu(config_page-> ConfigElement[i].VolDevHandle); r = 0; goto out; } } out: kfree(config_page); return r; } /** * mpt2sas_config_get_volume_wwid - returns wwid given the volume handle * @ioc: per adapter object * @volume_handle: volume handle * @wwid: volume wwid * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle, u64 *wwid) { Mpi2ConfigReply_t mpi_reply; Mpi2RaidVolPage1_t raid_vol_pg1; *wwid = 0; if (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, volume_handle))) { *wwid = le64_to_cpu(raid_vol_pg1.WWID); return 0; } else return -1; }
gpl-2.0
simo97/linux
arch/m68k/coldfire/m527x.c
1017
3371
/***************************************************************************/ /* * m527x.c -- platform support for ColdFire 527x based boards * * Sub-architcture dependent initialization code for the Freescale * 5270/5271 and 5274/5275 CPUs. * * Copyright (C) 1999-2004, Greg Ungerer (gerg@snapgear.com) * Copyright (C) 2001-2004, SnapGear Inc. (www.snapgear.com) */ /***************************************************************************/ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/io.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfuart.h> #include <asm/mcfclk.h> /***************************************************************************/ DEFINE_CLK(pll, "pll.0", MCF_CLK); DEFINE_CLK(sys, "sys.0", MCF_BUSCLK); DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK); DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK); DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK); DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK); DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK); DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK); DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK); DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK); DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK); DEFINE_CLK(fec1, "fec.1", MCF_BUSCLK); struct clk *mcf_clks[] = { &clk_pll, &clk_sys, &clk_mcfpit0, &clk_mcfpit1, &clk_mcfpit2, &clk_mcfpit3, &clk_mcfuart0, &clk_mcfuart1, &clk_mcfuart2, &clk_mcfqspi0, &clk_fec0, &clk_fec1, NULL }; /***************************************************************************/ static void __init m527x_qspi_init(void) { #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) #if defined(CONFIG_M5271) u16 par; /* setup QSPS pins for QSPI with gpio CS control */ writeb(0x1f, MCFGPIO_PAR_QSPI); /* and CS2 & CS3 as gpio */ par = readw(MCFGPIO_PAR_TIMER); par &= 0x3f3f; writew(par, MCFGPIO_PAR_TIMER); #elif defined(CONFIG_M5275) /* setup QSPS pins for QSPI with gpio CS control */ writew(0x003e, MCFGPIO_PAR_QSPI); #endif #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ } /***************************************************************************/ static void __init m527x_uarts_init(void) { u16 sepmask; /* * External Pin Mask Setting & Enable External Pin for Interface */ sepmask = readw(MCFGPIO_PAR_UART); sepmask |= UART0_ENABLE_MASK | UART1_ENABLE_MASK | UART2_ENABLE_MASK; writew(sepmask, MCFGPIO_PAR_UART); } /***************************************************************************/ static void __init m527x_fec_init(void) { u8 v; /* Set multi-function pins to ethernet mode for fec0 */ #if defined(CONFIG_M5271) v = readb(MCFGPIO_PAR_FECI2C); writeb(v | 0xf0, MCFGPIO_PAR_FECI2C); #else u16 par; par = readw(MCFGPIO_PAR_FECI2C); writew(par | 0xf00, MCFGPIO_PAR_FECI2C); v = readb(MCFGPIO_PAR_FEC0HL); writeb(v | 0xc0, MCFGPIO_PAR_FEC0HL); /* Set multi-function pins to ethernet mode for fec1 */ par = readw(MCFGPIO_PAR_FECI2C); writew(par | 0xa0, MCFGPIO_PAR_FECI2C); v = readb(MCFGPIO_PAR_FEC1HL); writeb(v | 0xc0, MCFGPIO_PAR_FEC1HL); #endif } /***************************************************************************/ void __init config_BSP(char *commandp, int size) { mach_sched_init = hw_timer_init; m527x_uarts_init(); m527x_fec_init(); m527x_qspi_init(); } /***************************************************************************/
gpl-2.0
Emotroid-Team/emotion_kernel_tw_edge
sound/soc/codecs/wm2200.c
2553
82325
/* * wm2200.c -- WM2200 ALSA SoC Audio driver * * Copyright 2012 Wolfson Microelectronics plc * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/firmware.h> #include <linux/gcd.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/regulator/fixed.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/jack.h> #include <sound/initval.h> #include <sound/tlv.h> #include <sound/wm2200.h> #include "wm2200.h" #include "wmfw.h" #include "wm_adsp.h" #define WM2200_DSP_CONTROL_1 0x00 #define WM2200_DSP_CONTROL_2 0x02 #define WM2200_DSP_CONTROL_3 0x03 #define WM2200_DSP_CONTROL_4 0x04 #define WM2200_DSP_CONTROL_5 0x06 #define WM2200_DSP_CONTROL_6 0x07 #define WM2200_DSP_CONTROL_7 0x08 #define WM2200_DSP_CONTROL_8 0x09 #define WM2200_DSP_CONTROL_9 0x0A #define WM2200_DSP_CONTROL_10 0x0B #define WM2200_DSP_CONTROL_11 0x0C #define WM2200_DSP_CONTROL_12 0x0D #define WM2200_DSP_CONTROL_13 0x0F #define WM2200_DSP_CONTROL_14 0x10 #define WM2200_DSP_CONTROL_15 0x11 #define WM2200_DSP_CONTROL_16 0x12 #define WM2200_DSP_CONTROL_17 0x13 #define WM2200_DSP_CONTROL_18 0x14 #define WM2200_DSP_CONTROL_19 0x16 #define WM2200_DSP_CONTROL_20 0x17 #define WM2200_DSP_CONTROL_21 0x18 #define WM2200_DSP_CONTROL_22 0x1A #define WM2200_DSP_CONTROL_23 0x1B #define WM2200_DSP_CONTROL_24 0x1C #define WM2200_DSP_CONTROL_25 0x1E #define WM2200_DSP_CONTROL_26 0x20 #define WM2200_DSP_CONTROL_27 0x21 #define WM2200_DSP_CONTROL_28 0x22 #define WM2200_DSP_CONTROL_29 0x23 #define WM2200_DSP_CONTROL_30 0x24 #define WM2200_DSP_CONTROL_31 0x26 /* The code assumes DCVDD is generated internally */ #define WM2200_NUM_CORE_SUPPLIES 2 static const char *wm2200_core_supply_names[WM2200_NUM_CORE_SUPPLIES] = { "DBVDD", "LDOVDD", }; struct wm2200_fll { int fref; int fout; int src; struct completion lock; }; /* codec private data */ struct wm2200_priv { struct wm_adsp dsp[2]; struct regmap *regmap; struct device *dev; struct snd_soc_codec *codec; struct wm2200_pdata pdata; struct regulator_bulk_data core_supplies[WM2200_NUM_CORE_SUPPLIES]; struct completion fll_lock; int fll_fout; int fll_fref; int fll_src; int rev; int sysclk; }; #define WM2200_DSP_RANGE_BASE (WM2200_MAX_REGISTER + 1) #define WM2200_DSP_SPACING 12288 #define WM2200_DSP1_DM_BASE (WM2200_DSP_RANGE_BASE + (0 * WM2200_DSP_SPACING)) #define WM2200_DSP1_PM_BASE (WM2200_DSP_RANGE_BASE + (1 * WM2200_DSP_SPACING)) #define WM2200_DSP1_ZM_BASE (WM2200_DSP_RANGE_BASE + (2 * WM2200_DSP_SPACING)) #define WM2200_DSP2_DM_BASE (WM2200_DSP_RANGE_BASE + (3 * WM2200_DSP_SPACING)) #define WM2200_DSP2_PM_BASE (WM2200_DSP_RANGE_BASE + (4 * WM2200_DSP_SPACING)) #define WM2200_DSP2_ZM_BASE (WM2200_DSP_RANGE_BASE + (5 * WM2200_DSP_SPACING)) static const struct regmap_range_cfg wm2200_ranges[] = { { .name = "DSP1DM", .range_min = WM2200_DSP1_DM_BASE, .range_max = WM2200_DSP1_DM_BASE + 12287, .selector_reg = WM2200_DSP1_CONTROL_3, .selector_mask = WM2200_DSP1_PAGE_BASE_DM_0_MASK, .selector_shift = WM2200_DSP1_PAGE_BASE_DM_0_SHIFT, .window_start = WM2200_DSP1_DM_0, .window_len = 2048, }, { .name = "DSP1PM", .range_min = WM2200_DSP1_PM_BASE, .range_max = WM2200_DSP1_PM_BASE + 12287, .selector_reg = WM2200_DSP1_CONTROL_2, .selector_mask = WM2200_DSP1_PAGE_BASE_PM_0_MASK, .selector_shift = WM2200_DSP1_PAGE_BASE_PM_0_SHIFT, .window_start = WM2200_DSP1_PM_0, .window_len = 768, }, { .name = "DSP1ZM", .range_min = WM2200_DSP1_ZM_BASE, .range_max = WM2200_DSP1_ZM_BASE + 2047, .selector_reg = WM2200_DSP1_CONTROL_4, .selector_mask = WM2200_DSP1_PAGE_BASE_ZM_0_MASK, .selector_shift = WM2200_DSP1_PAGE_BASE_ZM_0_SHIFT, .window_start = WM2200_DSP1_ZM_0, .window_len = 1024, }, { .name = "DSP2DM", .range_min = WM2200_DSP2_DM_BASE, .range_max = WM2200_DSP2_DM_BASE + 4095, .selector_reg = WM2200_DSP2_CONTROL_3, .selector_mask = WM2200_DSP2_PAGE_BASE_DM_0_MASK, .selector_shift = WM2200_DSP2_PAGE_BASE_DM_0_SHIFT, .window_start = WM2200_DSP2_DM_0, .window_len = 2048, }, { .name = "DSP2PM", .range_min = WM2200_DSP2_PM_BASE, .range_max = WM2200_DSP2_PM_BASE + 11287, .selector_reg = WM2200_DSP2_CONTROL_2, .selector_mask = WM2200_DSP2_PAGE_BASE_PM_0_MASK, .selector_shift = WM2200_DSP2_PAGE_BASE_PM_0_SHIFT, .window_start = WM2200_DSP2_PM_0, .window_len = 768, }, { .name = "DSP2ZM", .range_min = WM2200_DSP2_ZM_BASE, .range_max = WM2200_DSP2_ZM_BASE + 2047, .selector_reg = WM2200_DSP2_CONTROL_4, .selector_mask = WM2200_DSP2_PAGE_BASE_ZM_0_MASK, .selector_shift = WM2200_DSP2_PAGE_BASE_ZM_0_SHIFT, .window_start = WM2200_DSP2_ZM_0, .window_len = 1024, }, }; static const struct wm_adsp_region wm2200_dsp1_regions[] = { { .type = WMFW_ADSP1_PM, .base = WM2200_DSP1_PM_BASE }, { .type = WMFW_ADSP1_DM, .base = WM2200_DSP1_DM_BASE }, { .type = WMFW_ADSP1_ZM, .base = WM2200_DSP1_ZM_BASE }, }; static const struct wm_adsp_region wm2200_dsp2_regions[] = { { .type = WMFW_ADSP1_PM, .base = WM2200_DSP2_PM_BASE }, { .type = WMFW_ADSP1_DM, .base = WM2200_DSP2_DM_BASE }, { .type = WMFW_ADSP1_ZM, .base = WM2200_DSP2_ZM_BASE }, }; static struct reg_default wm2200_reg_defaults[] = { { 0x000B, 0x0000 }, /* R11 - Tone Generator 1 */ { 0x0102, 0x0000 }, /* R258 - Clocking 3 */ { 0x0103, 0x0011 }, /* R259 - Clocking 4 */ { 0x0111, 0x0000 }, /* R273 - FLL Control 1 */ { 0x0112, 0x0000 }, /* R274 - FLL Control 2 */ { 0x0113, 0x0000 }, /* R275 - FLL Control 3 */ { 0x0114, 0x0000 }, /* R276 - FLL Control 4 */ { 0x0116, 0x0177 }, /* R278 - FLL Control 6 */ { 0x0117, 0x0004 }, /* R279 - FLL Control 7 */ { 0x0119, 0x0000 }, /* R281 - FLL EFS 1 */ { 0x011A, 0x0002 }, /* R282 - FLL EFS 2 */ { 0x0200, 0x0000 }, /* R512 - Mic Charge Pump 1 */ { 0x0201, 0x03FF }, /* R513 - Mic Charge Pump 2 */ { 0x0202, 0x9BDE }, /* R514 - DM Charge Pump 1 */ { 0x020C, 0x0000 }, /* R524 - Mic Bias Ctrl 1 */ { 0x020D, 0x0000 }, /* R525 - Mic Bias Ctrl 2 */ { 0x020F, 0x0000 }, /* R527 - Ear Piece Ctrl 1 */ { 0x0210, 0x0000 }, /* R528 - Ear Piece Ctrl 2 */ { 0x0301, 0x0000 }, /* R769 - Input Enables */ { 0x0302, 0x2240 }, /* R770 - IN1L Control */ { 0x0303, 0x0040 }, /* R771 - IN1R Control */ { 0x0304, 0x2240 }, /* R772 - IN2L Control */ { 0x0305, 0x0040 }, /* R773 - IN2R Control */ { 0x0306, 0x2240 }, /* R774 - IN3L Control */ { 0x0307, 0x0040 }, /* R775 - IN3R Control */ { 0x030A, 0x0000 }, /* R778 - RXANC_SRC */ { 0x030B, 0x0022 }, /* R779 - Input Volume Ramp */ { 0x030C, 0x0180 }, /* R780 - ADC Digital Volume 1L */ { 0x030D, 0x0180 }, /* R781 - ADC Digital Volume 1R */ { 0x030E, 0x0180 }, /* R782 - ADC Digital Volume 2L */ { 0x030F, 0x0180 }, /* R783 - ADC Digital Volume 2R */ { 0x0310, 0x0180 }, /* R784 - ADC Digital Volume 3L */ { 0x0311, 0x0180 }, /* R785 - ADC Digital Volume 3R */ { 0x0400, 0x0000 }, /* R1024 - Output Enables */ { 0x0401, 0x0000 }, /* R1025 - DAC Volume Limit 1L */ { 0x0402, 0x0000 }, /* R1026 - DAC Volume Limit 1R */ { 0x0403, 0x0000 }, /* R1027 - DAC Volume Limit 2L */ { 0x0404, 0x0000 }, /* R1028 - DAC Volume Limit 2R */ { 0x0409, 0x0000 }, /* R1033 - DAC AEC Control 1 */ { 0x040A, 0x0022 }, /* R1034 - Output Volume Ramp */ { 0x040B, 0x0180 }, /* R1035 - DAC Digital Volume 1L */ { 0x040C, 0x0180 }, /* R1036 - DAC Digital Volume 1R */ { 0x040D, 0x0180 }, /* R1037 - DAC Digital Volume 2L */ { 0x040E, 0x0180 }, /* R1038 - DAC Digital Volume 2R */ { 0x0417, 0x0069 }, /* R1047 - PDM 1 */ { 0x0418, 0x0000 }, /* R1048 - PDM 2 */ { 0x0500, 0x0000 }, /* R1280 - Audio IF 1_1 */ { 0x0501, 0x0008 }, /* R1281 - Audio IF 1_2 */ { 0x0502, 0x0000 }, /* R1282 - Audio IF 1_3 */ { 0x0503, 0x0000 }, /* R1283 - Audio IF 1_4 */ { 0x0504, 0x0000 }, /* R1284 - Audio IF 1_5 */ { 0x0505, 0x0001 }, /* R1285 - Audio IF 1_6 */ { 0x0506, 0x0001 }, /* R1286 - Audio IF 1_7 */ { 0x0507, 0x0000 }, /* R1287 - Audio IF 1_8 */ { 0x0508, 0x0000 }, /* R1288 - Audio IF 1_9 */ { 0x0509, 0x0000 }, /* R1289 - Audio IF 1_10 */ { 0x050A, 0x0000 }, /* R1290 - Audio IF 1_11 */ { 0x050B, 0x0000 }, /* R1291 - Audio IF 1_12 */ { 0x050C, 0x0000 }, /* R1292 - Audio IF 1_13 */ { 0x050D, 0x0000 }, /* R1293 - Audio IF 1_14 */ { 0x050E, 0x0000 }, /* R1294 - Audio IF 1_15 */ { 0x050F, 0x0000 }, /* R1295 - Audio IF 1_16 */ { 0x0510, 0x0000 }, /* R1296 - Audio IF 1_17 */ { 0x0511, 0x0000 }, /* R1297 - Audio IF 1_18 */ { 0x0512, 0x0000 }, /* R1298 - Audio IF 1_19 */ { 0x0513, 0x0000 }, /* R1299 - Audio IF 1_20 */ { 0x0514, 0x0000 }, /* R1300 - Audio IF 1_21 */ { 0x0515, 0x0001 }, /* R1301 - Audio IF 1_22 */ { 0x0600, 0x0000 }, /* R1536 - OUT1LMIX Input 1 Source */ { 0x0601, 0x0080 }, /* R1537 - OUT1LMIX Input 1 Volume */ { 0x0602, 0x0000 }, /* R1538 - OUT1LMIX Input 2 Source */ { 0x0603, 0x0080 }, /* R1539 - OUT1LMIX Input 2 Volume */ { 0x0604, 0x0000 }, /* R1540 - OUT1LMIX Input 3 Source */ { 0x0605, 0x0080 }, /* R1541 - OUT1LMIX Input 3 Volume */ { 0x0606, 0x0000 }, /* R1542 - OUT1LMIX Input 4 Source */ { 0x0607, 0x0080 }, /* R1543 - OUT1LMIX Input 4 Volume */ { 0x0608, 0x0000 }, /* R1544 - OUT1RMIX Input 1 Source */ { 0x0609, 0x0080 }, /* R1545 - OUT1RMIX Input 1 Volume */ { 0x060A, 0x0000 }, /* R1546 - OUT1RMIX Input 2 Source */ { 0x060B, 0x0080 }, /* R1547 - OUT1RMIX Input 2 Volume */ { 0x060C, 0x0000 }, /* R1548 - OUT1RMIX Input 3 Source */ { 0x060D, 0x0080 }, /* R1549 - OUT1RMIX Input 3 Volume */ { 0x060E, 0x0000 }, /* R1550 - OUT1RMIX Input 4 Source */ { 0x060F, 0x0080 }, /* R1551 - OUT1RMIX Input 4 Volume */ { 0x0610, 0x0000 }, /* R1552 - OUT2LMIX Input 1 Source */ { 0x0611, 0x0080 }, /* R1553 - OUT2LMIX Input 1 Volume */ { 0x0612, 0x0000 }, /* R1554 - OUT2LMIX Input 2 Source */ { 0x0613, 0x0080 }, /* R1555 - OUT2LMIX Input 2 Volume */ { 0x0614, 0x0000 }, /* R1556 - OUT2LMIX Input 3 Source */ { 0x0615, 0x0080 }, /* R1557 - OUT2LMIX Input 3 Volume */ { 0x0616, 0x0000 }, /* R1558 - OUT2LMIX Input 4 Source */ { 0x0617, 0x0080 }, /* R1559 - OUT2LMIX Input 4 Volume */ { 0x0618, 0x0000 }, /* R1560 - OUT2RMIX Input 1 Source */ { 0x0619, 0x0080 }, /* R1561 - OUT2RMIX Input 1 Volume */ { 0x061A, 0x0000 }, /* R1562 - OUT2RMIX Input 2 Source */ { 0x061B, 0x0080 }, /* R1563 - OUT2RMIX Input 2 Volume */ { 0x061C, 0x0000 }, /* R1564 - OUT2RMIX Input 3 Source */ { 0x061D, 0x0080 }, /* R1565 - OUT2RMIX Input 3 Volume */ { 0x061E, 0x0000 }, /* R1566 - OUT2RMIX Input 4 Source */ { 0x061F, 0x0080 }, /* R1567 - OUT2RMIX Input 4 Volume */ { 0x0620, 0x0000 }, /* R1568 - AIF1TX1MIX Input 1 Source */ { 0x0621, 0x0080 }, /* R1569 - AIF1TX1MIX Input 1 Volume */ { 0x0622, 0x0000 }, /* R1570 - AIF1TX1MIX Input 2 Source */ { 0x0623, 0x0080 }, /* R1571 - AIF1TX1MIX Input 2 Volume */ { 0x0624, 0x0000 }, /* R1572 - AIF1TX1MIX Input 3 Source */ { 0x0625, 0x0080 }, /* R1573 - AIF1TX1MIX Input 3 Volume */ { 0x0626, 0x0000 }, /* R1574 - AIF1TX1MIX Input 4 Source */ { 0x0627, 0x0080 }, /* R1575 - AIF1TX1MIX Input 4 Volume */ { 0x0628, 0x0000 }, /* R1576 - AIF1TX2MIX Input 1 Source */ { 0x0629, 0x0080 }, /* R1577 - AIF1TX2MIX Input 1 Volume */ { 0x062A, 0x0000 }, /* R1578 - AIF1TX2MIX Input 2 Source */ { 0x062B, 0x0080 }, /* R1579 - AIF1TX2MIX Input 2 Volume */ { 0x062C, 0x0000 }, /* R1580 - AIF1TX2MIX Input 3 Source */ { 0x062D, 0x0080 }, /* R1581 - AIF1TX2MIX Input 3 Volume */ { 0x062E, 0x0000 }, /* R1582 - AIF1TX2MIX Input 4 Source */ { 0x062F, 0x0080 }, /* R1583 - AIF1TX2MIX Input 4 Volume */ { 0x0630, 0x0000 }, /* R1584 - AIF1TX3MIX Input 1 Source */ { 0x0631, 0x0080 }, /* R1585 - AIF1TX3MIX Input 1 Volume */ { 0x0632, 0x0000 }, /* R1586 - AIF1TX3MIX Input 2 Source */ { 0x0633, 0x0080 }, /* R1587 - AIF1TX3MIX Input 2 Volume */ { 0x0634, 0x0000 }, /* R1588 - AIF1TX3MIX Input 3 Source */ { 0x0635, 0x0080 }, /* R1589 - AIF1TX3MIX Input 3 Volume */ { 0x0636, 0x0000 }, /* R1590 - AIF1TX3MIX Input 4 Source */ { 0x0637, 0x0080 }, /* R1591 - AIF1TX3MIX Input 4 Volume */ { 0x0638, 0x0000 }, /* R1592 - AIF1TX4MIX Input 1 Source */ { 0x0639, 0x0080 }, /* R1593 - AIF1TX4MIX Input 1 Volume */ { 0x063A, 0x0000 }, /* R1594 - AIF1TX4MIX Input 2 Source */ { 0x063B, 0x0080 }, /* R1595 - AIF1TX4MIX Input 2 Volume */ { 0x063C, 0x0000 }, /* R1596 - AIF1TX4MIX Input 3 Source */ { 0x063D, 0x0080 }, /* R1597 - AIF1TX4MIX Input 3 Volume */ { 0x063E, 0x0000 }, /* R1598 - AIF1TX4MIX Input 4 Source */ { 0x063F, 0x0080 }, /* R1599 - AIF1TX4MIX Input 4 Volume */ { 0x0640, 0x0000 }, /* R1600 - AIF1TX5MIX Input 1 Source */ { 0x0641, 0x0080 }, /* R1601 - AIF1TX5MIX Input 1 Volume */ { 0x0642, 0x0000 }, /* R1602 - AIF1TX5MIX Input 2 Source */ { 0x0643, 0x0080 }, /* R1603 - AIF1TX5MIX Input 2 Volume */ { 0x0644, 0x0000 }, /* R1604 - AIF1TX5MIX Input 3 Source */ { 0x0645, 0x0080 }, /* R1605 - AIF1TX5MIX Input 3 Volume */ { 0x0646, 0x0000 }, /* R1606 - AIF1TX5MIX Input 4 Source */ { 0x0647, 0x0080 }, /* R1607 - AIF1TX5MIX Input 4 Volume */ { 0x0648, 0x0000 }, /* R1608 - AIF1TX6MIX Input 1 Source */ { 0x0649, 0x0080 }, /* R1609 - AIF1TX6MIX Input 1 Volume */ { 0x064A, 0x0000 }, /* R1610 - AIF1TX6MIX Input 2 Source */ { 0x064B, 0x0080 }, /* R1611 - AIF1TX6MIX Input 2 Volume */ { 0x064C, 0x0000 }, /* R1612 - AIF1TX6MIX Input 3 Source */ { 0x064D, 0x0080 }, /* R1613 - AIF1TX6MIX Input 3 Volume */ { 0x064E, 0x0000 }, /* R1614 - AIF1TX6MIX Input 4 Source */ { 0x064F, 0x0080 }, /* R1615 - AIF1TX6MIX Input 4 Volume */ { 0x0650, 0x0000 }, /* R1616 - EQLMIX Input 1 Source */ { 0x0651, 0x0080 }, /* R1617 - EQLMIX Input 1 Volume */ { 0x0652, 0x0000 }, /* R1618 - EQLMIX Input 2 Source */ { 0x0653, 0x0080 }, /* R1619 - EQLMIX Input 2 Volume */ { 0x0654, 0x0000 }, /* R1620 - EQLMIX Input 3 Source */ { 0x0655, 0x0080 }, /* R1621 - EQLMIX Input 3 Volume */ { 0x0656, 0x0000 }, /* R1622 - EQLMIX Input 4 Source */ { 0x0657, 0x0080 }, /* R1623 - EQLMIX Input 4 Volume */ { 0x0658, 0x0000 }, /* R1624 - EQRMIX Input 1 Source */ { 0x0659, 0x0080 }, /* R1625 - EQRMIX Input 1 Volume */ { 0x065A, 0x0000 }, /* R1626 - EQRMIX Input 2 Source */ { 0x065B, 0x0080 }, /* R1627 - EQRMIX Input 2 Volume */ { 0x065C, 0x0000 }, /* R1628 - EQRMIX Input 3 Source */ { 0x065D, 0x0080 }, /* R1629 - EQRMIX Input 3 Volume */ { 0x065E, 0x0000 }, /* R1630 - EQRMIX Input 4 Source */ { 0x065F, 0x0080 }, /* R1631 - EQRMIX Input 4 Volume */ { 0x0660, 0x0000 }, /* R1632 - LHPF1MIX Input 1 Source */ { 0x0661, 0x0080 }, /* R1633 - LHPF1MIX Input 1 Volume */ { 0x0662, 0x0000 }, /* R1634 - LHPF1MIX Input 2 Source */ { 0x0663, 0x0080 }, /* R1635 - LHPF1MIX Input 2 Volume */ { 0x0664, 0x0000 }, /* R1636 - LHPF1MIX Input 3 Source */ { 0x0665, 0x0080 }, /* R1637 - LHPF1MIX Input 3 Volume */ { 0x0666, 0x0000 }, /* R1638 - LHPF1MIX Input 4 Source */ { 0x0667, 0x0080 }, /* R1639 - LHPF1MIX Input 4 Volume */ { 0x0668, 0x0000 }, /* R1640 - LHPF2MIX Input 1 Source */ { 0x0669, 0x0080 }, /* R1641 - LHPF2MIX Input 1 Volume */ { 0x066A, 0x0000 }, /* R1642 - LHPF2MIX Input 2 Source */ { 0x066B, 0x0080 }, /* R1643 - LHPF2MIX Input 2 Volume */ { 0x066C, 0x0000 }, /* R1644 - LHPF2MIX Input 3 Source */ { 0x066D, 0x0080 }, /* R1645 - LHPF2MIX Input 3 Volume */ { 0x066E, 0x0000 }, /* R1646 - LHPF2MIX Input 4 Source */ { 0x066F, 0x0080 }, /* R1647 - LHPF2MIX Input 4 Volume */ { 0x0670, 0x0000 }, /* R1648 - DSP1LMIX Input 1 Source */ { 0x0671, 0x0080 }, /* R1649 - DSP1LMIX Input 1 Volume */ { 0x0672, 0x0000 }, /* R1650 - DSP1LMIX Input 2 Source */ { 0x0673, 0x0080 }, /* R1651 - DSP1LMIX Input 2 Volume */ { 0x0674, 0x0000 }, /* R1652 - DSP1LMIX Input 3 Source */ { 0x0675, 0x0080 }, /* R1653 - DSP1LMIX Input 3 Volume */ { 0x0676, 0x0000 }, /* R1654 - DSP1LMIX Input 4 Source */ { 0x0677, 0x0080 }, /* R1655 - DSP1LMIX Input 4 Volume */ { 0x0678, 0x0000 }, /* R1656 - DSP1RMIX Input 1 Source */ { 0x0679, 0x0080 }, /* R1657 - DSP1RMIX Input 1 Volume */ { 0x067A, 0x0000 }, /* R1658 - DSP1RMIX Input 2 Source */ { 0x067B, 0x0080 }, /* R1659 - DSP1RMIX Input 2 Volume */ { 0x067C, 0x0000 }, /* R1660 - DSP1RMIX Input 3 Source */ { 0x067D, 0x0080 }, /* R1661 - DSP1RMIX Input 3 Volume */ { 0x067E, 0x0000 }, /* R1662 - DSP1RMIX Input 4 Source */ { 0x067F, 0x0080 }, /* R1663 - DSP1RMIX Input 4 Volume */ { 0x0680, 0x0000 }, /* R1664 - DSP1AUX1MIX Input 1 Source */ { 0x0681, 0x0000 }, /* R1665 - DSP1AUX2MIX Input 1 Source */ { 0x0682, 0x0000 }, /* R1666 - DSP1AUX3MIX Input 1 Source */ { 0x0683, 0x0000 }, /* R1667 - DSP1AUX4MIX Input 1 Source */ { 0x0684, 0x0000 }, /* R1668 - DSP1AUX5MIX Input 1 Source */ { 0x0685, 0x0000 }, /* R1669 - DSP1AUX6MIX Input 1 Source */ { 0x0686, 0x0000 }, /* R1670 - DSP2LMIX Input 1 Source */ { 0x0687, 0x0080 }, /* R1671 - DSP2LMIX Input 1 Volume */ { 0x0688, 0x0000 }, /* R1672 - DSP2LMIX Input 2 Source */ { 0x0689, 0x0080 }, /* R1673 - DSP2LMIX Input 2 Volume */ { 0x068A, 0x0000 }, /* R1674 - DSP2LMIX Input 3 Source */ { 0x068B, 0x0080 }, /* R1675 - DSP2LMIX Input 3 Volume */ { 0x068C, 0x0000 }, /* R1676 - DSP2LMIX Input 4 Source */ { 0x068D, 0x0080 }, /* R1677 - DSP2LMIX Input 4 Volume */ { 0x068E, 0x0000 }, /* R1678 - DSP2RMIX Input 1 Source */ { 0x068F, 0x0080 }, /* R1679 - DSP2RMIX Input 1 Volume */ { 0x0690, 0x0000 }, /* R1680 - DSP2RMIX Input 2 Source */ { 0x0691, 0x0080 }, /* R1681 - DSP2RMIX Input 2 Volume */ { 0x0692, 0x0000 }, /* R1682 - DSP2RMIX Input 3 Source */ { 0x0693, 0x0080 }, /* R1683 - DSP2RMIX Input 3 Volume */ { 0x0694, 0x0000 }, /* R1684 - DSP2RMIX Input 4 Source */ { 0x0695, 0x0080 }, /* R1685 - DSP2RMIX Input 4 Volume */ { 0x0696, 0x0000 }, /* R1686 - DSP2AUX1MIX Input 1 Source */ { 0x0697, 0x0000 }, /* R1687 - DSP2AUX2MIX Input 1 Source */ { 0x0698, 0x0000 }, /* R1688 - DSP2AUX3MIX Input 1 Source */ { 0x0699, 0x0000 }, /* R1689 - DSP2AUX4MIX Input 1 Source */ { 0x069A, 0x0000 }, /* R1690 - DSP2AUX5MIX Input 1 Source */ { 0x069B, 0x0000 }, /* R1691 - DSP2AUX6MIX Input 1 Source */ { 0x0700, 0xA101 }, /* R1792 - GPIO CTRL 1 */ { 0x0701, 0xA101 }, /* R1793 - GPIO CTRL 2 */ { 0x0702, 0xA101 }, /* R1794 - GPIO CTRL 3 */ { 0x0703, 0xA101 }, /* R1795 - GPIO CTRL 4 */ { 0x0709, 0x0000 }, /* R1801 - Misc Pad Ctrl 1 */ { 0x0801, 0x00FF }, /* R2049 - Interrupt Status 1 Mask */ { 0x0804, 0xFFFF }, /* R2052 - Interrupt Status 2 Mask */ { 0x0808, 0x0000 }, /* R2056 - Interrupt Control */ { 0x0900, 0x0000 }, /* R2304 - EQL_1 */ { 0x0901, 0x0000 }, /* R2305 - EQL_2 */ { 0x0902, 0x0000 }, /* R2306 - EQL_3 */ { 0x0903, 0x0000 }, /* R2307 - EQL_4 */ { 0x0904, 0x0000 }, /* R2308 - EQL_5 */ { 0x0905, 0x0000 }, /* R2309 - EQL_6 */ { 0x0906, 0x0000 }, /* R2310 - EQL_7 */ { 0x0907, 0x0000 }, /* R2311 - EQL_8 */ { 0x0908, 0x0000 }, /* R2312 - EQL_9 */ { 0x0909, 0x0000 }, /* R2313 - EQL_10 */ { 0x090A, 0x0000 }, /* R2314 - EQL_11 */ { 0x090B, 0x0000 }, /* R2315 - EQL_12 */ { 0x090C, 0x0000 }, /* R2316 - EQL_13 */ { 0x090D, 0x0000 }, /* R2317 - EQL_14 */ { 0x090E, 0x0000 }, /* R2318 - EQL_15 */ { 0x090F, 0x0000 }, /* R2319 - EQL_16 */ { 0x0910, 0x0000 }, /* R2320 - EQL_17 */ { 0x0911, 0x0000 }, /* R2321 - EQL_18 */ { 0x0912, 0x0000 }, /* R2322 - EQL_19 */ { 0x0913, 0x0000 }, /* R2323 - EQL_20 */ { 0x0916, 0x0000 }, /* R2326 - EQR_1 */ { 0x0917, 0x0000 }, /* R2327 - EQR_2 */ { 0x0918, 0x0000 }, /* R2328 - EQR_3 */ { 0x0919, 0x0000 }, /* R2329 - EQR_4 */ { 0x091A, 0x0000 }, /* R2330 - EQR_5 */ { 0x091B, 0x0000 }, /* R2331 - EQR_6 */ { 0x091C, 0x0000 }, /* R2332 - EQR_7 */ { 0x091D, 0x0000 }, /* R2333 - EQR_8 */ { 0x091E, 0x0000 }, /* R2334 - EQR_9 */ { 0x091F, 0x0000 }, /* R2335 - EQR_10 */ { 0x0920, 0x0000 }, /* R2336 - EQR_11 */ { 0x0921, 0x0000 }, /* R2337 - EQR_12 */ { 0x0922, 0x0000 }, /* R2338 - EQR_13 */ { 0x0923, 0x0000 }, /* R2339 - EQR_14 */ { 0x0924, 0x0000 }, /* R2340 - EQR_15 */ { 0x0925, 0x0000 }, /* R2341 - EQR_16 */ { 0x0926, 0x0000 }, /* R2342 - EQR_17 */ { 0x0927, 0x0000 }, /* R2343 - EQR_18 */ { 0x0928, 0x0000 }, /* R2344 - EQR_19 */ { 0x0929, 0x0000 }, /* R2345 - EQR_20 */ { 0x093E, 0x0000 }, /* R2366 - HPLPF1_1 */ { 0x093F, 0x0000 }, /* R2367 - HPLPF1_2 */ { 0x0942, 0x0000 }, /* R2370 - HPLPF2_1 */ { 0x0943, 0x0000 }, /* R2371 - HPLPF2_2 */ { 0x0A00, 0x0000 }, /* R2560 - DSP1 Control 1 */ { 0x0A02, 0x0000 }, /* R2562 - DSP1 Control 2 */ { 0x0A03, 0x0000 }, /* R2563 - DSP1 Control 3 */ { 0x0A04, 0x0000 }, /* R2564 - DSP1 Control 4 */ { 0x0A06, 0x0000 }, /* R2566 - DSP1 Control 5 */ { 0x0A07, 0x0000 }, /* R2567 - DSP1 Control 6 */ { 0x0A08, 0x0000 }, /* R2568 - DSP1 Control 7 */ { 0x0A09, 0x0000 }, /* R2569 - DSP1 Control 8 */ { 0x0A0A, 0x0000 }, /* R2570 - DSP1 Control 9 */ { 0x0A0B, 0x0000 }, /* R2571 - DSP1 Control 10 */ { 0x0A0C, 0x0000 }, /* R2572 - DSP1 Control 11 */ { 0x0A0D, 0x0000 }, /* R2573 - DSP1 Control 12 */ { 0x0A0F, 0x0000 }, /* R2575 - DSP1 Control 13 */ { 0x0A10, 0x0000 }, /* R2576 - DSP1 Control 14 */ { 0x0A11, 0x0000 }, /* R2577 - DSP1 Control 15 */ { 0x0A12, 0x0000 }, /* R2578 - DSP1 Control 16 */ { 0x0A13, 0x0000 }, /* R2579 - DSP1 Control 17 */ { 0x0A14, 0x0000 }, /* R2580 - DSP1 Control 18 */ { 0x0A16, 0x0000 }, /* R2582 - DSP1 Control 19 */ { 0x0A17, 0x0000 }, /* R2583 - DSP1 Control 20 */ { 0x0A18, 0x0000 }, /* R2584 - DSP1 Control 21 */ { 0x0A1A, 0x1800 }, /* R2586 - DSP1 Control 22 */ { 0x0A1B, 0x1000 }, /* R2587 - DSP1 Control 23 */ { 0x0A1C, 0x0400 }, /* R2588 - DSP1 Control 24 */ { 0x0A1E, 0x0000 }, /* R2590 - DSP1 Control 25 */ { 0x0A20, 0x0000 }, /* R2592 - DSP1 Control 26 */ { 0x0A21, 0x0000 }, /* R2593 - DSP1 Control 27 */ { 0x0A22, 0x0000 }, /* R2594 - DSP1 Control 28 */ { 0x0A23, 0x0000 }, /* R2595 - DSP1 Control 29 */ { 0x0A24, 0x0000 }, /* R2596 - DSP1 Control 30 */ { 0x0A26, 0x0000 }, /* R2598 - DSP1 Control 31 */ { 0x0B00, 0x0000 }, /* R2816 - DSP2 Control 1 */ { 0x0B02, 0x0000 }, /* R2818 - DSP2 Control 2 */ { 0x0B03, 0x0000 }, /* R2819 - DSP2 Control 3 */ { 0x0B04, 0x0000 }, /* R2820 - DSP2 Control 4 */ { 0x0B06, 0x0000 }, /* R2822 - DSP2 Control 5 */ { 0x0B07, 0x0000 }, /* R2823 - DSP2 Control 6 */ { 0x0B08, 0x0000 }, /* R2824 - DSP2 Control 7 */ { 0x0B09, 0x0000 }, /* R2825 - DSP2 Control 8 */ { 0x0B0A, 0x0000 }, /* R2826 - DSP2 Control 9 */ { 0x0B0B, 0x0000 }, /* R2827 - DSP2 Control 10 */ { 0x0B0C, 0x0000 }, /* R2828 - DSP2 Control 11 */ { 0x0B0D, 0x0000 }, /* R2829 - DSP2 Control 12 */ { 0x0B0F, 0x0000 }, /* R2831 - DSP2 Control 13 */ { 0x0B10, 0x0000 }, /* R2832 - DSP2 Control 14 */ { 0x0B11, 0x0000 }, /* R2833 - DSP2 Control 15 */ { 0x0B12, 0x0000 }, /* R2834 - DSP2 Control 16 */ { 0x0B13, 0x0000 }, /* R2835 - DSP2 Control 17 */ { 0x0B14, 0x0000 }, /* R2836 - DSP2 Control 18 */ { 0x0B16, 0x0000 }, /* R2838 - DSP2 Control 19 */ { 0x0B17, 0x0000 }, /* R2839 - DSP2 Control 20 */ { 0x0B18, 0x0000 }, /* R2840 - DSP2 Control 21 */ { 0x0B1A, 0x0800 }, /* R2842 - DSP2 Control 22 */ { 0x0B1B, 0x1000 }, /* R2843 - DSP2 Control 23 */ { 0x0B1C, 0x0400 }, /* R2844 - DSP2 Control 24 */ { 0x0B1E, 0x0000 }, /* R2846 - DSP2 Control 25 */ { 0x0B20, 0x0000 }, /* R2848 - DSP2 Control 26 */ { 0x0B21, 0x0000 }, /* R2849 - DSP2 Control 27 */ { 0x0B22, 0x0000 }, /* R2850 - DSP2 Control 28 */ { 0x0B23, 0x0000 }, /* R2851 - DSP2 Control 29 */ { 0x0B24, 0x0000 }, /* R2852 - DSP2 Control 30 */ { 0x0B26, 0x0000 }, /* R2854 - DSP2 Control 31 */ }; static bool wm2200_volatile_register(struct device *dev, unsigned int reg) { int i; for (i = 0; i < ARRAY_SIZE(wm2200_ranges); i++) if ((reg >= wm2200_ranges[i].window_start && reg <= wm2200_ranges[i].window_start + wm2200_ranges[i].window_len) || (reg >= wm2200_ranges[i].range_min && reg <= wm2200_ranges[i].range_max)) return true; switch (reg) { case WM2200_SOFTWARE_RESET: case WM2200_DEVICE_REVISION: case WM2200_ADPS1_IRQ0: case WM2200_ADPS1_IRQ1: case WM2200_INTERRUPT_STATUS_1: case WM2200_INTERRUPT_STATUS_2: case WM2200_INTERRUPT_RAW_STATUS_2: return true; default: return false; } } static bool wm2200_readable_register(struct device *dev, unsigned int reg) { int i; for (i = 0; i < ARRAY_SIZE(wm2200_ranges); i++) if ((reg >= wm2200_ranges[i].window_start && reg <= wm2200_ranges[i].window_start + wm2200_ranges[i].window_len) || (reg >= wm2200_ranges[i].range_min && reg <= wm2200_ranges[i].range_max)) return true; switch (reg) { case WM2200_SOFTWARE_RESET: case WM2200_DEVICE_REVISION: case WM2200_TONE_GENERATOR_1: case WM2200_CLOCKING_3: case WM2200_CLOCKING_4: case WM2200_FLL_CONTROL_1: case WM2200_FLL_CONTROL_2: case WM2200_FLL_CONTROL_3: case WM2200_FLL_CONTROL_4: case WM2200_FLL_CONTROL_6: case WM2200_FLL_CONTROL_7: case WM2200_FLL_EFS_1: case WM2200_FLL_EFS_2: case WM2200_MIC_CHARGE_PUMP_1: case WM2200_MIC_CHARGE_PUMP_2: case WM2200_DM_CHARGE_PUMP_1: case WM2200_MIC_BIAS_CTRL_1: case WM2200_MIC_BIAS_CTRL_2: case WM2200_EAR_PIECE_CTRL_1: case WM2200_EAR_PIECE_CTRL_2: case WM2200_INPUT_ENABLES: case WM2200_IN1L_CONTROL: case WM2200_IN1R_CONTROL: case WM2200_IN2L_CONTROL: case WM2200_IN2R_CONTROL: case WM2200_IN3L_CONTROL: case WM2200_IN3R_CONTROL: case WM2200_RXANC_SRC: case WM2200_INPUT_VOLUME_RAMP: case WM2200_ADC_DIGITAL_VOLUME_1L: case WM2200_ADC_DIGITAL_VOLUME_1R: case WM2200_ADC_DIGITAL_VOLUME_2L: case WM2200_ADC_DIGITAL_VOLUME_2R: case WM2200_ADC_DIGITAL_VOLUME_3L: case WM2200_ADC_DIGITAL_VOLUME_3R: case WM2200_OUTPUT_ENABLES: case WM2200_DAC_VOLUME_LIMIT_1L: case WM2200_DAC_VOLUME_LIMIT_1R: case WM2200_DAC_VOLUME_LIMIT_2L: case WM2200_DAC_VOLUME_LIMIT_2R: case WM2200_DAC_AEC_CONTROL_1: case WM2200_OUTPUT_VOLUME_RAMP: case WM2200_DAC_DIGITAL_VOLUME_1L: case WM2200_DAC_DIGITAL_VOLUME_1R: case WM2200_DAC_DIGITAL_VOLUME_2L: case WM2200_DAC_DIGITAL_VOLUME_2R: case WM2200_PDM_1: case WM2200_PDM_2: case WM2200_AUDIO_IF_1_1: case WM2200_AUDIO_IF_1_2: case WM2200_AUDIO_IF_1_3: case WM2200_AUDIO_IF_1_4: case WM2200_AUDIO_IF_1_5: case WM2200_AUDIO_IF_1_6: case WM2200_AUDIO_IF_1_7: case WM2200_AUDIO_IF_1_8: case WM2200_AUDIO_IF_1_9: case WM2200_AUDIO_IF_1_10: case WM2200_AUDIO_IF_1_11: case WM2200_AUDIO_IF_1_12: case WM2200_AUDIO_IF_1_13: case WM2200_AUDIO_IF_1_14: case WM2200_AUDIO_IF_1_15: case WM2200_AUDIO_IF_1_16: case WM2200_AUDIO_IF_1_17: case WM2200_AUDIO_IF_1_18: case WM2200_AUDIO_IF_1_19: case WM2200_AUDIO_IF_1_20: case WM2200_AUDIO_IF_1_21: case WM2200_AUDIO_IF_1_22: case WM2200_OUT1LMIX_INPUT_1_SOURCE: case WM2200_OUT1LMIX_INPUT_1_VOLUME: case WM2200_OUT1LMIX_INPUT_2_SOURCE: case WM2200_OUT1LMIX_INPUT_2_VOLUME: case WM2200_OUT1LMIX_INPUT_3_SOURCE: case WM2200_OUT1LMIX_INPUT_3_VOLUME: case WM2200_OUT1LMIX_INPUT_4_SOURCE: case WM2200_OUT1LMIX_INPUT_4_VOLUME: case WM2200_OUT1RMIX_INPUT_1_SOURCE: case WM2200_OUT1RMIX_INPUT_1_VOLUME: case WM2200_OUT1RMIX_INPUT_2_SOURCE: case WM2200_OUT1RMIX_INPUT_2_VOLUME: case WM2200_OUT1RMIX_INPUT_3_SOURCE: case WM2200_OUT1RMIX_INPUT_3_VOLUME: case WM2200_OUT1RMIX_INPUT_4_SOURCE: case WM2200_OUT1RMIX_INPUT_4_VOLUME: case WM2200_OUT2LMIX_INPUT_1_SOURCE: case WM2200_OUT2LMIX_INPUT_1_VOLUME: case WM2200_OUT2LMIX_INPUT_2_SOURCE: case WM2200_OUT2LMIX_INPUT_2_VOLUME: case WM2200_OUT2LMIX_INPUT_3_SOURCE: case WM2200_OUT2LMIX_INPUT_3_VOLUME: case WM2200_OUT2LMIX_INPUT_4_SOURCE: case WM2200_OUT2LMIX_INPUT_4_VOLUME: case WM2200_OUT2RMIX_INPUT_1_SOURCE: case WM2200_OUT2RMIX_INPUT_1_VOLUME: case WM2200_OUT2RMIX_INPUT_2_SOURCE: case WM2200_OUT2RMIX_INPUT_2_VOLUME: case WM2200_OUT2RMIX_INPUT_3_SOURCE: case WM2200_OUT2RMIX_INPUT_3_VOLUME: case WM2200_OUT2RMIX_INPUT_4_SOURCE: case WM2200_OUT2RMIX_INPUT_4_VOLUME: case WM2200_AIF1TX1MIX_INPUT_1_SOURCE: case WM2200_AIF1TX1MIX_INPUT_1_VOLUME: case WM2200_AIF1TX1MIX_INPUT_2_SOURCE: case WM2200_AIF1TX1MIX_INPUT_2_VOLUME: case WM2200_AIF1TX1MIX_INPUT_3_SOURCE: case WM2200_AIF1TX1MIX_INPUT_3_VOLUME: case WM2200_AIF1TX1MIX_INPUT_4_SOURCE: case WM2200_AIF1TX1MIX_INPUT_4_VOLUME: case WM2200_AIF1TX2MIX_INPUT_1_SOURCE: case WM2200_AIF1TX2MIX_INPUT_1_VOLUME: case WM2200_AIF1TX2MIX_INPUT_2_SOURCE: case WM2200_AIF1TX2MIX_INPUT_2_VOLUME: case WM2200_AIF1TX2MIX_INPUT_3_SOURCE: case WM2200_AIF1TX2MIX_INPUT_3_VOLUME: case WM2200_AIF1TX2MIX_INPUT_4_SOURCE: case WM2200_AIF1TX2MIX_INPUT_4_VOLUME: case WM2200_AIF1TX3MIX_INPUT_1_SOURCE: case WM2200_AIF1TX3MIX_INPUT_1_VOLUME: case WM2200_AIF1TX3MIX_INPUT_2_SOURCE: case WM2200_AIF1TX3MIX_INPUT_2_VOLUME: case WM2200_AIF1TX3MIX_INPUT_3_SOURCE: case WM2200_AIF1TX3MIX_INPUT_3_VOLUME: case WM2200_AIF1TX3MIX_INPUT_4_SOURCE: case WM2200_AIF1TX3MIX_INPUT_4_VOLUME: case WM2200_AIF1TX4MIX_INPUT_1_SOURCE: case WM2200_AIF1TX4MIX_INPUT_1_VOLUME: case WM2200_AIF1TX4MIX_INPUT_2_SOURCE: case WM2200_AIF1TX4MIX_INPUT_2_VOLUME: case WM2200_AIF1TX4MIX_INPUT_3_SOURCE: case WM2200_AIF1TX4MIX_INPUT_3_VOLUME: case WM2200_AIF1TX4MIX_INPUT_4_SOURCE: case WM2200_AIF1TX4MIX_INPUT_4_VOLUME: case WM2200_AIF1TX5MIX_INPUT_1_SOURCE: case WM2200_AIF1TX5MIX_INPUT_1_VOLUME: case WM2200_AIF1TX5MIX_INPUT_2_SOURCE: case WM2200_AIF1TX5MIX_INPUT_2_VOLUME: case WM2200_AIF1TX5MIX_INPUT_3_SOURCE: case WM2200_AIF1TX5MIX_INPUT_3_VOLUME: case WM2200_AIF1TX5MIX_INPUT_4_SOURCE: case WM2200_AIF1TX5MIX_INPUT_4_VOLUME: case WM2200_AIF1TX6MIX_INPUT_1_SOURCE: case WM2200_AIF1TX6MIX_INPUT_1_VOLUME: case WM2200_AIF1TX6MIX_INPUT_2_SOURCE: case WM2200_AIF1TX6MIX_INPUT_2_VOLUME: case WM2200_AIF1TX6MIX_INPUT_3_SOURCE: case WM2200_AIF1TX6MIX_INPUT_3_VOLUME: case WM2200_AIF1TX6MIX_INPUT_4_SOURCE: case WM2200_AIF1TX6MIX_INPUT_4_VOLUME: case WM2200_EQLMIX_INPUT_1_SOURCE: case WM2200_EQLMIX_INPUT_1_VOLUME: case WM2200_EQLMIX_INPUT_2_SOURCE: case WM2200_EQLMIX_INPUT_2_VOLUME: case WM2200_EQLMIX_INPUT_3_SOURCE: case WM2200_EQLMIX_INPUT_3_VOLUME: case WM2200_EQLMIX_INPUT_4_SOURCE: case WM2200_EQLMIX_INPUT_4_VOLUME: case WM2200_EQRMIX_INPUT_1_SOURCE: case WM2200_EQRMIX_INPUT_1_VOLUME: case WM2200_EQRMIX_INPUT_2_SOURCE: case WM2200_EQRMIX_INPUT_2_VOLUME: case WM2200_EQRMIX_INPUT_3_SOURCE: case WM2200_EQRMIX_INPUT_3_VOLUME: case WM2200_EQRMIX_INPUT_4_SOURCE: case WM2200_EQRMIX_INPUT_4_VOLUME: case WM2200_LHPF1MIX_INPUT_1_SOURCE: case WM2200_LHPF1MIX_INPUT_1_VOLUME: case WM2200_LHPF1MIX_INPUT_2_SOURCE: case WM2200_LHPF1MIX_INPUT_2_VOLUME: case WM2200_LHPF1MIX_INPUT_3_SOURCE: case WM2200_LHPF1MIX_INPUT_3_VOLUME: case WM2200_LHPF1MIX_INPUT_4_SOURCE: case WM2200_LHPF1MIX_INPUT_4_VOLUME: case WM2200_LHPF2MIX_INPUT_1_SOURCE: case WM2200_LHPF2MIX_INPUT_1_VOLUME: case WM2200_LHPF2MIX_INPUT_2_SOURCE: case WM2200_LHPF2MIX_INPUT_2_VOLUME: case WM2200_LHPF2MIX_INPUT_3_SOURCE: case WM2200_LHPF2MIX_INPUT_3_VOLUME: case WM2200_LHPF2MIX_INPUT_4_SOURCE: case WM2200_LHPF2MIX_INPUT_4_VOLUME: case WM2200_DSP1LMIX_INPUT_1_SOURCE: case WM2200_DSP1LMIX_INPUT_1_VOLUME: case WM2200_DSP1LMIX_INPUT_2_SOURCE: case WM2200_DSP1LMIX_INPUT_2_VOLUME: case WM2200_DSP1LMIX_INPUT_3_SOURCE: case WM2200_DSP1LMIX_INPUT_3_VOLUME: case WM2200_DSP1LMIX_INPUT_4_SOURCE: case WM2200_DSP1LMIX_INPUT_4_VOLUME: case WM2200_DSP1RMIX_INPUT_1_SOURCE: case WM2200_DSP1RMIX_INPUT_1_VOLUME: case WM2200_DSP1RMIX_INPUT_2_SOURCE: case WM2200_DSP1RMIX_INPUT_2_VOLUME: case WM2200_DSP1RMIX_INPUT_3_SOURCE: case WM2200_DSP1RMIX_INPUT_3_VOLUME: case WM2200_DSP1RMIX_INPUT_4_SOURCE: case WM2200_DSP1RMIX_INPUT_4_VOLUME: case WM2200_DSP1AUX1MIX_INPUT_1_SOURCE: case WM2200_DSP1AUX2MIX_INPUT_1_SOURCE: case WM2200_DSP1AUX3MIX_INPUT_1_SOURCE: case WM2200_DSP1AUX4MIX_INPUT_1_SOURCE: case WM2200_DSP1AUX5MIX_INPUT_1_SOURCE: case WM2200_DSP1AUX6MIX_INPUT_1_SOURCE: case WM2200_DSP2LMIX_INPUT_1_SOURCE: case WM2200_DSP2LMIX_INPUT_1_VOLUME: case WM2200_DSP2LMIX_INPUT_2_SOURCE: case WM2200_DSP2LMIX_INPUT_2_VOLUME: case WM2200_DSP2LMIX_INPUT_3_SOURCE: case WM2200_DSP2LMIX_INPUT_3_VOLUME: case WM2200_DSP2LMIX_INPUT_4_SOURCE: case WM2200_DSP2LMIX_INPUT_4_VOLUME: case WM2200_DSP2RMIX_INPUT_1_SOURCE: case WM2200_DSP2RMIX_INPUT_1_VOLUME: case WM2200_DSP2RMIX_INPUT_2_SOURCE: case WM2200_DSP2RMIX_INPUT_2_VOLUME: case WM2200_DSP2RMIX_INPUT_3_SOURCE: case WM2200_DSP2RMIX_INPUT_3_VOLUME: case WM2200_DSP2RMIX_INPUT_4_SOURCE: case WM2200_DSP2RMIX_INPUT_4_VOLUME: case WM2200_DSP2AUX1MIX_INPUT_1_SOURCE: case WM2200_DSP2AUX2MIX_INPUT_1_SOURCE: case WM2200_DSP2AUX3MIX_INPUT_1_SOURCE: case WM2200_DSP2AUX4MIX_INPUT_1_SOURCE: case WM2200_DSP2AUX5MIX_INPUT_1_SOURCE: case WM2200_DSP2AUX6MIX_INPUT_1_SOURCE: case WM2200_GPIO_CTRL_1: case WM2200_GPIO_CTRL_2: case WM2200_GPIO_CTRL_3: case WM2200_GPIO_CTRL_4: case WM2200_ADPS1_IRQ0: case WM2200_ADPS1_IRQ1: case WM2200_MISC_PAD_CTRL_1: case WM2200_INTERRUPT_STATUS_1: case WM2200_INTERRUPT_STATUS_1_MASK: case WM2200_INTERRUPT_STATUS_2: case WM2200_INTERRUPT_RAW_STATUS_2: case WM2200_INTERRUPT_STATUS_2_MASK: case WM2200_INTERRUPT_CONTROL: case WM2200_EQL_1: case WM2200_EQL_2: case WM2200_EQL_3: case WM2200_EQL_4: case WM2200_EQL_5: case WM2200_EQL_6: case WM2200_EQL_7: case WM2200_EQL_8: case WM2200_EQL_9: case WM2200_EQL_10: case WM2200_EQL_11: case WM2200_EQL_12: case WM2200_EQL_13: case WM2200_EQL_14: case WM2200_EQL_15: case WM2200_EQL_16: case WM2200_EQL_17: case WM2200_EQL_18: case WM2200_EQL_19: case WM2200_EQL_20: case WM2200_EQR_1: case WM2200_EQR_2: case WM2200_EQR_3: case WM2200_EQR_4: case WM2200_EQR_5: case WM2200_EQR_6: case WM2200_EQR_7: case WM2200_EQR_8: case WM2200_EQR_9: case WM2200_EQR_10: case WM2200_EQR_11: case WM2200_EQR_12: case WM2200_EQR_13: case WM2200_EQR_14: case WM2200_EQR_15: case WM2200_EQR_16: case WM2200_EQR_17: case WM2200_EQR_18: case WM2200_EQR_19: case WM2200_EQR_20: case WM2200_HPLPF1_1: case WM2200_HPLPF1_2: case WM2200_HPLPF2_1: case WM2200_HPLPF2_2: case WM2200_DSP1_CONTROL_1: case WM2200_DSP1_CONTROL_2: case WM2200_DSP1_CONTROL_3: case WM2200_DSP1_CONTROL_4: case WM2200_DSP1_CONTROL_5: case WM2200_DSP1_CONTROL_6: case WM2200_DSP1_CONTROL_7: case WM2200_DSP1_CONTROL_8: case WM2200_DSP1_CONTROL_9: case WM2200_DSP1_CONTROL_10: case WM2200_DSP1_CONTROL_11: case WM2200_DSP1_CONTROL_12: case WM2200_DSP1_CONTROL_13: case WM2200_DSP1_CONTROL_14: case WM2200_DSP1_CONTROL_15: case WM2200_DSP1_CONTROL_16: case WM2200_DSP1_CONTROL_17: case WM2200_DSP1_CONTROL_18: case WM2200_DSP1_CONTROL_19: case WM2200_DSP1_CONTROL_20: case WM2200_DSP1_CONTROL_21: case WM2200_DSP1_CONTROL_22: case WM2200_DSP1_CONTROL_23: case WM2200_DSP1_CONTROL_24: case WM2200_DSP1_CONTROL_25: case WM2200_DSP1_CONTROL_26: case WM2200_DSP1_CONTROL_27: case WM2200_DSP1_CONTROL_28: case WM2200_DSP1_CONTROL_29: case WM2200_DSP1_CONTROL_30: case WM2200_DSP1_CONTROL_31: case WM2200_DSP2_CONTROL_1: case WM2200_DSP2_CONTROL_2: case WM2200_DSP2_CONTROL_3: case WM2200_DSP2_CONTROL_4: case WM2200_DSP2_CONTROL_5: case WM2200_DSP2_CONTROL_6: case WM2200_DSP2_CONTROL_7: case WM2200_DSP2_CONTROL_8: case WM2200_DSP2_CONTROL_9: case WM2200_DSP2_CONTROL_10: case WM2200_DSP2_CONTROL_11: case WM2200_DSP2_CONTROL_12: case WM2200_DSP2_CONTROL_13: case WM2200_DSP2_CONTROL_14: case WM2200_DSP2_CONTROL_15: case WM2200_DSP2_CONTROL_16: case WM2200_DSP2_CONTROL_17: case WM2200_DSP2_CONTROL_18: case WM2200_DSP2_CONTROL_19: case WM2200_DSP2_CONTROL_20: case WM2200_DSP2_CONTROL_21: case WM2200_DSP2_CONTROL_22: case WM2200_DSP2_CONTROL_23: case WM2200_DSP2_CONTROL_24: case WM2200_DSP2_CONTROL_25: case WM2200_DSP2_CONTROL_26: case WM2200_DSP2_CONTROL_27: case WM2200_DSP2_CONTROL_28: case WM2200_DSP2_CONTROL_29: case WM2200_DSP2_CONTROL_30: case WM2200_DSP2_CONTROL_31: return true; default: return false; } } static const struct reg_default wm2200_reva_patch[] = { { 0x07, 0x0003 }, { 0x102, 0x0200 }, { 0x203, 0x0084 }, { 0x201, 0x83FF }, { 0x20C, 0x0062 }, { 0x20D, 0x0062 }, { 0x207, 0x2002 }, { 0x208, 0x20C0 }, { 0x21D, 0x01C0 }, { 0x50A, 0x0001 }, { 0x50B, 0x0002 }, { 0x50C, 0x0003 }, { 0x50D, 0x0004 }, { 0x50E, 0x0005 }, { 0x510, 0x0001 }, { 0x511, 0x0002 }, { 0x512, 0x0003 }, { 0x513, 0x0004 }, { 0x514, 0x0005 }, { 0x515, 0x0000 }, { 0x201, 0x8084 }, { 0x202, 0xBBDE }, { 0x203, 0x00EC }, { 0x500, 0x8000 }, { 0x507, 0x1820 }, { 0x508, 0x1820 }, { 0x505, 0x0300 }, { 0x506, 0x0300 }, { 0x302, 0x2280 }, { 0x303, 0x0080 }, { 0x304, 0x2280 }, { 0x305, 0x0080 }, { 0x306, 0x2280 }, { 0x307, 0x0080 }, { 0x401, 0x0080 }, { 0x402, 0x0080 }, { 0x417, 0x3069 }, { 0x900, 0x6318 }, { 0x901, 0x6300 }, { 0x902, 0x0FC8 }, { 0x903, 0x03FE }, { 0x904, 0x00E0 }, { 0x905, 0x1EC4 }, { 0x906, 0xF136 }, { 0x907, 0x0409 }, { 0x908, 0x04CC }, { 0x909, 0x1C9B }, { 0x90A, 0xF337 }, { 0x90B, 0x040B }, { 0x90C, 0x0CBB }, { 0x90D, 0x16F8 }, { 0x90E, 0xF7D9 }, { 0x90F, 0x040A }, { 0x910, 0x1F14 }, { 0x911, 0x058C }, { 0x912, 0x0563 }, { 0x913, 0x4000 }, { 0x916, 0x6318 }, { 0x917, 0x6300 }, { 0x918, 0x0FC8 }, { 0x919, 0x03FE }, { 0x91A, 0x00E0 }, { 0x91B, 0x1EC4 }, { 0x91C, 0xF136 }, { 0x91D, 0x0409 }, { 0x91E, 0x04CC }, { 0x91F, 0x1C9B }, { 0x920, 0xF337 }, { 0x921, 0x040B }, { 0x922, 0x0CBB }, { 0x923, 0x16F8 }, { 0x924, 0xF7D9 }, { 0x925, 0x040A }, { 0x926, 0x1F14 }, { 0x927, 0x058C }, { 0x928, 0x0563 }, { 0x929, 0x4000 }, { 0x709, 0x2000 }, { 0x207, 0x200E }, { 0x208, 0x20D4 }, { 0x20A, 0x0080 }, { 0x07, 0x0000 }, }; static int wm2200_reset(struct wm2200_priv *wm2200) { if (wm2200->pdata.reset) { gpio_set_value_cansleep(wm2200->pdata.reset, 0); gpio_set_value_cansleep(wm2200->pdata.reset, 1); return 0; } else { return regmap_write(wm2200->regmap, WM2200_SOFTWARE_RESET, 0x2200); } } static DECLARE_TLV_DB_SCALE(in_tlv, -6300, 100, 0); static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); static DECLARE_TLV_DB_SCALE(out_tlv, -6400, 100, 0); static const char *wm2200_mixer_texts[] = { "None", "Tone Generator", "AEC Loopback", "IN1L", "IN1R", "IN2L", "IN2R", "IN3L", "IN3R", "AIF1RX1", "AIF1RX2", "AIF1RX3", "AIF1RX4", "AIF1RX5", "AIF1RX6", "EQL", "EQR", "LHPF1", "LHPF2", "DSP1.1", "DSP1.2", "DSP1.3", "DSP1.4", "DSP1.5", "DSP1.6", "DSP2.1", "DSP2.2", "DSP2.3", "DSP2.4", "DSP2.5", "DSP2.6", }; static int wm2200_mixer_values[] = { 0x00, 0x04, /* Tone */ 0x08, /* AEC */ 0x10, /* Input */ 0x11, 0x12, 0x13, 0x14, 0x15, 0x20, /* AIF */ 0x21, 0x22, 0x23, 0x24, 0x25, 0x50, /* EQ */ 0x51, 0x60, /* LHPF1 */ 0x61, /* LHPF2 */ 0x68, /* DSP1 */ 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x70, /* DSP2 */ 0x71, 0x72, 0x73, 0x74, 0x75, }; #define WM2200_MIXER_CONTROLS(name, base) \ SOC_SINGLE_TLV(name " Input 1 Volume", base + 1 , \ WM2200_MIXER_VOL_SHIFT, 80, 0, mixer_tlv), \ SOC_SINGLE_TLV(name " Input 2 Volume", base + 3 , \ WM2200_MIXER_VOL_SHIFT, 80, 0, mixer_tlv), \ SOC_SINGLE_TLV(name " Input 3 Volume", base + 5 , \ WM2200_MIXER_VOL_SHIFT, 80, 0, mixer_tlv), \ SOC_SINGLE_TLV(name " Input 4 Volume", base + 7 , \ WM2200_MIXER_VOL_SHIFT, 80, 0, mixer_tlv) #define WM2200_MUX_ENUM_DECL(name, reg) \ SOC_VALUE_ENUM_SINGLE_DECL(name, reg, 0, 0xff, \ wm2200_mixer_texts, wm2200_mixer_values) #define WM2200_MUX_CTL_DECL(name) \ const struct snd_kcontrol_new name##_mux = \ SOC_DAPM_VALUE_ENUM("Route", name##_enum) #define WM2200_MIXER_ENUMS(name, base_reg) \ static WM2200_MUX_ENUM_DECL(name##_in1_enum, base_reg); \ static WM2200_MUX_ENUM_DECL(name##_in2_enum, base_reg + 2); \ static WM2200_MUX_ENUM_DECL(name##_in3_enum, base_reg + 4); \ static WM2200_MUX_ENUM_DECL(name##_in4_enum, base_reg + 6); \ static WM2200_MUX_CTL_DECL(name##_in1); \ static WM2200_MUX_CTL_DECL(name##_in2); \ static WM2200_MUX_CTL_DECL(name##_in3); \ static WM2200_MUX_CTL_DECL(name##_in4) #define WM2200_DSP_ENUMS(name, base_reg) \ static WM2200_MUX_ENUM_DECL(name##_aux1_enum, base_reg); \ static WM2200_MUX_ENUM_DECL(name##_aux2_enum, base_reg + 1); \ static WM2200_MUX_ENUM_DECL(name##_aux3_enum, base_reg + 2); \ static WM2200_MUX_ENUM_DECL(name##_aux4_enum, base_reg + 3); \ static WM2200_MUX_ENUM_DECL(name##_aux5_enum, base_reg + 4); \ static WM2200_MUX_ENUM_DECL(name##_aux6_enum, base_reg + 5); \ static WM2200_MUX_CTL_DECL(name##_aux1); \ static WM2200_MUX_CTL_DECL(name##_aux2); \ static WM2200_MUX_CTL_DECL(name##_aux3); \ static WM2200_MUX_CTL_DECL(name##_aux4); \ static WM2200_MUX_CTL_DECL(name##_aux5); \ static WM2200_MUX_CTL_DECL(name##_aux6); static const char *wm2200_rxanc_input_sel_texts[] = { "None", "IN1", "IN2", "IN3", }; static const struct soc_enum wm2200_rxanc_input_sel = SOC_ENUM_SINGLE(WM2200_RXANC_SRC, WM2200_IN_RXANC_SEL_SHIFT, ARRAY_SIZE(wm2200_rxanc_input_sel_texts), wm2200_rxanc_input_sel_texts); static const struct snd_kcontrol_new wm2200_snd_controls[] = { SOC_SINGLE("IN1 High Performance Switch", WM2200_IN1L_CONTROL, WM2200_IN1_OSR_SHIFT, 1, 0), SOC_SINGLE("IN2 High Performance Switch", WM2200_IN2L_CONTROL, WM2200_IN2_OSR_SHIFT, 1, 0), SOC_SINGLE("IN3 High Performance Switch", WM2200_IN3L_CONTROL, WM2200_IN3_OSR_SHIFT, 1, 0), SOC_DOUBLE_R_TLV("IN1 Volume", WM2200_IN1L_CONTROL, WM2200_IN1R_CONTROL, WM2200_IN1L_PGA_VOL_SHIFT, 0x5f, 0, in_tlv), SOC_DOUBLE_R_TLV("IN2 Volume", WM2200_IN2L_CONTROL, WM2200_IN2R_CONTROL, WM2200_IN2L_PGA_VOL_SHIFT, 0x5f, 0, in_tlv), SOC_DOUBLE_R_TLV("IN3 Volume", WM2200_IN3L_CONTROL, WM2200_IN3R_CONTROL, WM2200_IN3L_PGA_VOL_SHIFT, 0x5f, 0, in_tlv), SOC_DOUBLE_R("IN1 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L, WM2200_ADC_DIGITAL_VOLUME_1R, WM2200_IN1L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_2L, WM2200_ADC_DIGITAL_VOLUME_2R, WM2200_IN2L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_3L, WM2200_ADC_DIGITAL_VOLUME_3R, WM2200_IN3L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R_TLV("IN1 Digital Volume", WM2200_ADC_DIGITAL_VOLUME_1L, WM2200_ADC_DIGITAL_VOLUME_1R, WM2200_IN1L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE_R_TLV("IN2 Digital Volume", WM2200_ADC_DIGITAL_VOLUME_2L, WM2200_ADC_DIGITAL_VOLUME_2R, WM2200_IN2L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE_R_TLV("IN3 Digital Volume", WM2200_ADC_DIGITAL_VOLUME_3L, WM2200_ADC_DIGITAL_VOLUME_3R, WM2200_IN3L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SND_SOC_BYTES_MASK("EQL Coefficients", WM2200_EQL_1, 20, WM2200_EQL_ENA), SND_SOC_BYTES_MASK("EQR Coefficients", WM2200_EQR_1, 20, WM2200_EQR_ENA), SND_SOC_BYTES("LHPF1 Coefficeints", WM2200_HPLPF1_2, 1), SND_SOC_BYTES("LHPF2 Coefficeints", WM2200_HPLPF2_2, 1), SOC_SINGLE("OUT1 High Performance Switch", WM2200_DAC_DIGITAL_VOLUME_1L, WM2200_OUT1_OSR_SHIFT, 1, 0), SOC_SINGLE("OUT2 High Performance Switch", WM2200_DAC_DIGITAL_VOLUME_2L, WM2200_OUT2_OSR_SHIFT, 1, 0), SOC_DOUBLE_R("OUT1 Digital Switch", WM2200_DAC_DIGITAL_VOLUME_1L, WM2200_DAC_DIGITAL_VOLUME_1R, WM2200_OUT1L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R_TLV("OUT1 Digital Volume", WM2200_DAC_DIGITAL_VOLUME_1L, WM2200_DAC_DIGITAL_VOLUME_1R, WM2200_OUT1L_VOL_SHIFT, 0x9f, 0, digital_tlv), SOC_DOUBLE_R_TLV("OUT1 Volume", WM2200_DAC_VOLUME_LIMIT_1L, WM2200_DAC_VOLUME_LIMIT_1R, WM2200_OUT1L_PGA_VOL_SHIFT, 0x46, 0, out_tlv), SOC_DOUBLE_R("OUT2 Digital Switch", WM2200_DAC_DIGITAL_VOLUME_2L, WM2200_DAC_DIGITAL_VOLUME_2R, WM2200_OUT2L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R_TLV("OUT2 Digital Volume", WM2200_DAC_DIGITAL_VOLUME_2L, WM2200_DAC_DIGITAL_VOLUME_2R, WM2200_OUT2L_VOL_SHIFT, 0x9f, 0, digital_tlv), SOC_DOUBLE("OUT2 Switch", WM2200_PDM_1, WM2200_SPK1L_MUTE_SHIFT, WM2200_SPK1R_MUTE_SHIFT, 1, 1), SOC_ENUM("RxANC Src", wm2200_rxanc_input_sel), }; WM2200_MIXER_ENUMS(OUT1L, WM2200_OUT1LMIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(OUT1R, WM2200_OUT1RMIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(OUT2L, WM2200_OUT2LMIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(OUT2R, WM2200_OUT2RMIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(AIF1TX1, WM2200_AIF1TX1MIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(AIF1TX2, WM2200_AIF1TX2MIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(AIF1TX3, WM2200_AIF1TX3MIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(AIF1TX4, WM2200_AIF1TX4MIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(AIF1TX5, WM2200_AIF1TX5MIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(AIF1TX6, WM2200_AIF1TX6MIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(EQL, WM2200_EQLMIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(EQR, WM2200_EQRMIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(DSP1L, WM2200_DSP1LMIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(DSP1R, WM2200_DSP1RMIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(DSP2L, WM2200_DSP2LMIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(DSP2R, WM2200_DSP2RMIX_INPUT_1_SOURCE); WM2200_DSP_ENUMS(DSP1, WM2200_DSP1AUX1MIX_INPUT_1_SOURCE); WM2200_DSP_ENUMS(DSP2, WM2200_DSP2AUX1MIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(LHPF1, WM2200_LHPF1MIX_INPUT_1_SOURCE); WM2200_MIXER_ENUMS(LHPF2, WM2200_LHPF2MIX_INPUT_1_SOURCE); #define WM2200_MUX(name, ctrl) \ SND_SOC_DAPM_VALUE_MUX(name, SND_SOC_NOPM, 0, 0, ctrl) #define WM2200_MIXER_WIDGETS(name, name_str) \ WM2200_MUX(name_str " Input 1", &name##_in1_mux), \ WM2200_MUX(name_str " Input 2", &name##_in2_mux), \ WM2200_MUX(name_str " Input 3", &name##_in3_mux), \ WM2200_MUX(name_str " Input 4", &name##_in4_mux), \ SND_SOC_DAPM_MIXER(name_str " Mixer", SND_SOC_NOPM, 0, 0, NULL, 0) #define WM2200_DSP_WIDGETS(name, name_str) \ WM2200_MIXER_WIDGETS(name##L, name_str "L"), \ WM2200_MIXER_WIDGETS(name##R, name_str "R"), \ WM2200_MUX(name_str " Aux 1", &name##_aux1_mux), \ WM2200_MUX(name_str " Aux 2", &name##_aux2_mux), \ WM2200_MUX(name_str " Aux 3", &name##_aux3_mux), \ WM2200_MUX(name_str " Aux 4", &name##_aux4_mux), \ WM2200_MUX(name_str " Aux 5", &name##_aux5_mux), \ WM2200_MUX(name_str " Aux 6", &name##_aux6_mux) #define WM2200_MIXER_INPUT_ROUTES(name) \ { name, "Tone Generator", "Tone Generator" }, \ { name, "AEC Loopback", "AEC Loopback" }, \ { name, "IN1L", "IN1L PGA" }, \ { name, "IN1R", "IN1R PGA" }, \ { name, "IN2L", "IN2L PGA" }, \ { name, "IN2R", "IN2R PGA" }, \ { name, "IN3L", "IN3L PGA" }, \ { name, "IN3R", "IN3R PGA" }, \ { name, "DSP1.1", "DSP1" }, \ { name, "DSP1.2", "DSP1" }, \ { name, "DSP1.3", "DSP1" }, \ { name, "DSP1.4", "DSP1" }, \ { name, "DSP1.5", "DSP1" }, \ { name, "DSP1.6", "DSP1" }, \ { name, "DSP2.1", "DSP2" }, \ { name, "DSP2.2", "DSP2" }, \ { name, "DSP2.3", "DSP2" }, \ { name, "DSP2.4", "DSP2" }, \ { name, "DSP2.5", "DSP2" }, \ { name, "DSP2.6", "DSP2" }, \ { name, "AIF1RX1", "AIF1RX1" }, \ { name, "AIF1RX2", "AIF1RX2" }, \ { name, "AIF1RX3", "AIF1RX3" }, \ { name, "AIF1RX4", "AIF1RX4" }, \ { name, "AIF1RX5", "AIF1RX5" }, \ { name, "AIF1RX6", "AIF1RX6" }, \ { name, "EQL", "EQL" }, \ { name, "EQR", "EQR" }, \ { name, "LHPF1", "LHPF1" }, \ { name, "LHPF2", "LHPF2" } #define WM2200_MIXER_ROUTES(widget, name) \ { widget, NULL, name " Mixer" }, \ { name " Mixer", NULL, name " Input 1" }, \ { name " Mixer", NULL, name " Input 2" }, \ { name " Mixer", NULL, name " Input 3" }, \ { name " Mixer", NULL, name " Input 4" }, \ WM2200_MIXER_INPUT_ROUTES(name " Input 1"), \ WM2200_MIXER_INPUT_ROUTES(name " Input 2"), \ WM2200_MIXER_INPUT_ROUTES(name " Input 3"), \ WM2200_MIXER_INPUT_ROUTES(name " Input 4") #define WM2200_DSP_AUX_ROUTES(name) \ { name, NULL, name " Aux 1" }, \ { name, NULL, name " Aux 2" }, \ { name, NULL, name " Aux 3" }, \ { name, NULL, name " Aux 4" }, \ { name, NULL, name " Aux 5" }, \ { name, NULL, name " Aux 6" }, \ WM2200_MIXER_INPUT_ROUTES(name " Aux 1"), \ WM2200_MIXER_INPUT_ROUTES(name " Aux 2"), \ WM2200_MIXER_INPUT_ROUTES(name " Aux 3"), \ WM2200_MIXER_INPUT_ROUTES(name " Aux 4"), \ WM2200_MIXER_INPUT_ROUTES(name " Aux 5"), \ WM2200_MIXER_INPUT_ROUTES(name " Aux 6") static const char *wm2200_aec_loopback_texts[] = { "OUT1L", "OUT1R", "OUT2L", "OUT2R", }; static const struct soc_enum wm2200_aec_loopback = SOC_ENUM_SINGLE(WM2200_DAC_AEC_CONTROL_1, WM2200_AEC_LOOPBACK_SRC_SHIFT, ARRAY_SIZE(wm2200_aec_loopback_texts), wm2200_aec_loopback_texts); static const struct snd_kcontrol_new wm2200_aec_loopback_mux = SOC_DAPM_ENUM("AEC Loopback", wm2200_aec_loopback); static const struct snd_soc_dapm_widget wm2200_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("SYSCLK", WM2200_CLOCKING_3, WM2200_SYSCLK_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("CP1", WM2200_DM_CHARGE_PUMP_1, WM2200_CPDM_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("CP2", WM2200_MIC_CHARGE_PUMP_1, WM2200_CPMIC_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS1", WM2200_MIC_BIAS_CTRL_1, WM2200_MICB1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS2", WM2200_MIC_BIAS_CTRL_2, WM2200_MICB2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("CPVDD", 20, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("AVDD", 20, 0), SND_SOC_DAPM_INPUT("IN1L"), SND_SOC_DAPM_INPUT("IN1R"), SND_SOC_DAPM_INPUT("IN2L"), SND_SOC_DAPM_INPUT("IN2R"), SND_SOC_DAPM_INPUT("IN3L"), SND_SOC_DAPM_INPUT("IN3R"), SND_SOC_DAPM_SIGGEN("TONE"), SND_SOC_DAPM_PGA("Tone Generator", WM2200_TONE_GENERATOR_1, WM2200_TONE_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("IN1L PGA", WM2200_INPUT_ENABLES, WM2200_IN1L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("IN1R PGA", WM2200_INPUT_ENABLES, WM2200_IN1R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("IN2L PGA", WM2200_INPUT_ENABLES, WM2200_IN2L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("IN2R PGA", WM2200_INPUT_ENABLES, WM2200_IN2R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("IN3L PGA", WM2200_INPUT_ENABLES, WM2200_IN3L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("IN3R PGA", WM2200_INPUT_ENABLES, WM2200_IN3R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_AIF_IN("AIF1RX1", "Playback", 0, WM2200_AUDIO_IF_1_22, WM2200_AIF1RX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX2", "Playback", 1, WM2200_AUDIO_IF_1_22, WM2200_AIF1RX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX3", "Playback", 2, WM2200_AUDIO_IF_1_22, WM2200_AIF1RX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX4", "Playback", 3, WM2200_AUDIO_IF_1_22, WM2200_AIF1RX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX5", "Playback", 4, WM2200_AUDIO_IF_1_22, WM2200_AIF1RX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX6", "Playback", 5, WM2200_AUDIO_IF_1_22, WM2200_AIF1RX6_ENA_SHIFT, 0), SND_SOC_DAPM_PGA("EQL", WM2200_EQL_1, WM2200_EQL_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQR", WM2200_EQR_1, WM2200_EQR_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF1", WM2200_HPLPF1_1, WM2200_LHPF1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF2", WM2200_HPLPF2_1, WM2200_LHPF2_ENA_SHIFT, 0, NULL, 0), WM_ADSP1("DSP1", 0), WM_ADSP1("DSP2", 1), SND_SOC_DAPM_AIF_OUT("AIF1TX1", "Capture", 0, WM2200_AUDIO_IF_1_22, WM2200_AIF1TX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX2", "Capture", 1, WM2200_AUDIO_IF_1_22, WM2200_AIF1TX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX3", "Capture", 2, WM2200_AUDIO_IF_1_22, WM2200_AIF1TX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX4", "Capture", 3, WM2200_AUDIO_IF_1_22, WM2200_AIF1TX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX5", "Capture", 4, WM2200_AUDIO_IF_1_22, WM2200_AIF1TX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX6", "Capture", 5, WM2200_AUDIO_IF_1_22, WM2200_AIF1TX6_ENA_SHIFT, 0), SND_SOC_DAPM_MUX("AEC Loopback", WM2200_DAC_AEC_CONTROL_1, WM2200_AEC_LOOPBACK_ENA_SHIFT, 0, &wm2200_aec_loopback_mux), SND_SOC_DAPM_PGA_S("OUT1L", 0, WM2200_OUTPUT_ENABLES, WM2200_OUT1L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("OUT1R", 0, WM2200_OUTPUT_ENABLES, WM2200_OUT1R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_LP", 1, WM2200_EAR_PIECE_CTRL_1, WM2200_EPD_LP_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_OUTP_LP", 1, WM2200_EAR_PIECE_CTRL_1, WM2200_EPD_OUTP_LP_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_RMV_SHRT_LP", 1, WM2200_EAR_PIECE_CTRL_1, WM2200_EPD_RMV_SHRT_LP_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_LN", 1, WM2200_EAR_PIECE_CTRL_1, WM2200_EPD_LN_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_OUTP_LN", 1, WM2200_EAR_PIECE_CTRL_1, WM2200_EPD_OUTP_LN_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_RMV_SHRT_LN", 1, WM2200_EAR_PIECE_CTRL_1, WM2200_EPD_RMV_SHRT_LN_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_RP", 1, WM2200_EAR_PIECE_CTRL_2, WM2200_EPD_RP_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_OUTP_RP", 1, WM2200_EAR_PIECE_CTRL_2, WM2200_EPD_OUTP_RP_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_RMV_SHRT_RP", 1, WM2200_EAR_PIECE_CTRL_2, WM2200_EPD_RMV_SHRT_RP_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_RN", 1, WM2200_EAR_PIECE_CTRL_2, WM2200_EPD_RN_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_OUTP_RN", 1, WM2200_EAR_PIECE_CTRL_2, WM2200_EPD_OUTP_RN_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA_S("EPD_RMV_SHRT_RN", 1, WM2200_EAR_PIECE_CTRL_2, WM2200_EPD_RMV_SHRT_RN_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("OUT2L", WM2200_OUTPUT_ENABLES, WM2200_OUT2L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("OUT2R", WM2200_OUTPUT_ENABLES, WM2200_OUT2R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("EPOUTLN"), SND_SOC_DAPM_OUTPUT("EPOUTLP"), SND_SOC_DAPM_OUTPUT("EPOUTRN"), SND_SOC_DAPM_OUTPUT("EPOUTRP"), SND_SOC_DAPM_OUTPUT("SPK"), WM2200_MIXER_WIDGETS(EQL, "EQL"), WM2200_MIXER_WIDGETS(EQR, "EQR"), WM2200_MIXER_WIDGETS(LHPF1, "LHPF1"), WM2200_MIXER_WIDGETS(LHPF2, "LHPF2"), WM2200_DSP_WIDGETS(DSP1, "DSP1"), WM2200_DSP_WIDGETS(DSP2, "DSP2"), WM2200_MIXER_WIDGETS(AIF1TX1, "AIF1TX1"), WM2200_MIXER_WIDGETS(AIF1TX2, "AIF1TX2"), WM2200_MIXER_WIDGETS(AIF1TX3, "AIF1TX3"), WM2200_MIXER_WIDGETS(AIF1TX4, "AIF1TX4"), WM2200_MIXER_WIDGETS(AIF1TX5, "AIF1TX5"), WM2200_MIXER_WIDGETS(AIF1TX6, "AIF1TX6"), WM2200_MIXER_WIDGETS(OUT1L, "OUT1L"), WM2200_MIXER_WIDGETS(OUT1R, "OUT1R"), WM2200_MIXER_WIDGETS(OUT2L, "OUT2L"), WM2200_MIXER_WIDGETS(OUT2R, "OUT2R"), }; static const struct snd_soc_dapm_route wm2200_dapm_routes[] = { /* Everything needs SYSCLK but only hook up things on the edge * of the chip */ { "IN1L", NULL, "SYSCLK" }, { "IN1R", NULL, "SYSCLK" }, { "IN2L", NULL, "SYSCLK" }, { "IN2R", NULL, "SYSCLK" }, { "IN3L", NULL, "SYSCLK" }, { "IN3R", NULL, "SYSCLK" }, { "OUT1L", NULL, "SYSCLK" }, { "OUT1R", NULL, "SYSCLK" }, { "OUT2L", NULL, "SYSCLK" }, { "OUT2R", NULL, "SYSCLK" }, { "AIF1RX1", NULL, "SYSCLK" }, { "AIF1RX2", NULL, "SYSCLK" }, { "AIF1RX3", NULL, "SYSCLK" }, { "AIF1RX4", NULL, "SYSCLK" }, { "AIF1RX5", NULL, "SYSCLK" }, { "AIF1RX6", NULL, "SYSCLK" }, { "AIF1TX1", NULL, "SYSCLK" }, { "AIF1TX2", NULL, "SYSCLK" }, { "AIF1TX3", NULL, "SYSCLK" }, { "AIF1TX4", NULL, "SYSCLK" }, { "AIF1TX5", NULL, "SYSCLK" }, { "AIF1TX6", NULL, "SYSCLK" }, { "IN1L", NULL, "AVDD" }, { "IN1R", NULL, "AVDD" }, { "IN2L", NULL, "AVDD" }, { "IN2R", NULL, "AVDD" }, { "IN3L", NULL, "AVDD" }, { "IN3R", NULL, "AVDD" }, { "OUT1L", NULL, "AVDD" }, { "OUT1R", NULL, "AVDD" }, { "IN1L PGA", NULL, "IN1L" }, { "IN1R PGA", NULL, "IN1R" }, { "IN2L PGA", NULL, "IN2L" }, { "IN2R PGA", NULL, "IN2R" }, { "IN3L PGA", NULL, "IN3L" }, { "IN3R PGA", NULL, "IN3R" }, { "Tone Generator", NULL, "TONE" }, { "CP2", NULL, "CPVDD" }, { "MICBIAS1", NULL, "CP2" }, { "MICBIAS2", NULL, "CP2" }, { "CP1", NULL, "CPVDD" }, { "EPD_LN", NULL, "CP1" }, { "EPD_LP", NULL, "CP1" }, { "EPD_RN", NULL, "CP1" }, { "EPD_RP", NULL, "CP1" }, { "EPD_LP", NULL, "OUT1L" }, { "EPD_OUTP_LP", NULL, "EPD_LP" }, { "EPD_RMV_SHRT_LP", NULL, "EPD_OUTP_LP" }, { "EPOUTLP", NULL, "EPD_RMV_SHRT_LP" }, { "EPD_LN", NULL, "OUT1L" }, { "EPD_OUTP_LN", NULL, "EPD_LN" }, { "EPD_RMV_SHRT_LN", NULL, "EPD_OUTP_LN" }, { "EPOUTLN", NULL, "EPD_RMV_SHRT_LN" }, { "EPD_RP", NULL, "OUT1R" }, { "EPD_OUTP_RP", NULL, "EPD_RP" }, { "EPD_RMV_SHRT_RP", NULL, "EPD_OUTP_RP" }, { "EPOUTRP", NULL, "EPD_RMV_SHRT_RP" }, { "EPD_RN", NULL, "OUT1R" }, { "EPD_OUTP_RN", NULL, "EPD_RN" }, { "EPD_RMV_SHRT_RN", NULL, "EPD_OUTP_RN" }, { "EPOUTRN", NULL, "EPD_RMV_SHRT_RN" }, { "SPK", NULL, "OUT2L" }, { "SPK", NULL, "OUT2R" }, { "AEC Loopback", "OUT1L", "OUT1L" }, { "AEC Loopback", "OUT1R", "OUT1R" }, { "AEC Loopback", "OUT2L", "OUT2L" }, { "AEC Loopback", "OUT2R", "OUT2R" }, WM2200_MIXER_ROUTES("DSP1", "DSP1L"), WM2200_MIXER_ROUTES("DSP1", "DSP1R"), WM2200_MIXER_ROUTES("DSP2", "DSP2L"), WM2200_MIXER_ROUTES("DSP2", "DSP2R"), WM2200_DSP_AUX_ROUTES("DSP1"), WM2200_DSP_AUX_ROUTES("DSP2"), WM2200_MIXER_ROUTES("OUT1L", "OUT1L"), WM2200_MIXER_ROUTES("OUT1R", "OUT1R"), WM2200_MIXER_ROUTES("OUT2L", "OUT2L"), WM2200_MIXER_ROUTES("OUT2R", "OUT2R"), WM2200_MIXER_ROUTES("AIF1TX1", "AIF1TX1"), WM2200_MIXER_ROUTES("AIF1TX2", "AIF1TX2"), WM2200_MIXER_ROUTES("AIF1TX3", "AIF1TX3"), WM2200_MIXER_ROUTES("AIF1TX4", "AIF1TX4"), WM2200_MIXER_ROUTES("AIF1TX5", "AIF1TX5"), WM2200_MIXER_ROUTES("AIF1TX6", "AIF1TX6"), WM2200_MIXER_ROUTES("EQL", "EQL"), WM2200_MIXER_ROUTES("EQR", "EQR"), WM2200_MIXER_ROUTES("LHPF1", "LHPF1"), WM2200_MIXER_ROUTES("LHPF2", "LHPF2"), }; static int wm2200_probe(struct snd_soc_codec *codec) { struct wm2200_priv *wm2200 = dev_get_drvdata(codec->dev); int ret; wm2200->codec = codec; codec->control_data = wm2200->regmap; codec->dapm.bias_level = SND_SOC_BIAS_OFF; ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_REGMAP); if (ret != 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } ret = snd_soc_add_codec_controls(codec, wm_adsp1_fw_controls, 2); if (ret != 0) return ret; return ret; } static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_codec *codec = dai->codec; int lrclk, bclk, fmt_val; lrclk = 0; bclk = 0; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: fmt_val = 0; break; case SND_SOC_DAIFMT_I2S: fmt_val = 2; break; default: dev_err(codec->dev, "Unsupported DAI format %d\n", fmt & SND_SOC_DAIFMT_FORMAT_MASK); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: break; case SND_SOC_DAIFMT_CBS_CFM: lrclk |= WM2200_AIF1TX_LRCLK_MSTR; break; case SND_SOC_DAIFMT_CBM_CFS: bclk |= WM2200_AIF1_BCLK_MSTR; break; case SND_SOC_DAIFMT_CBM_CFM: lrclk |= WM2200_AIF1TX_LRCLK_MSTR; bclk |= WM2200_AIF1_BCLK_MSTR; break; default: dev_err(codec->dev, "Unsupported master mode %d\n", fmt & SND_SOC_DAIFMT_MASTER_MASK); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: bclk |= WM2200_AIF1_BCLK_INV; lrclk |= WM2200_AIF1TX_LRCLK_INV; break; case SND_SOC_DAIFMT_IB_NF: bclk |= WM2200_AIF1_BCLK_INV; break; case SND_SOC_DAIFMT_NB_IF: lrclk |= WM2200_AIF1TX_LRCLK_INV; break; default: return -EINVAL; } snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_1, WM2200_AIF1_BCLK_MSTR | WM2200_AIF1_BCLK_INV, bclk); snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_2, WM2200_AIF1TX_LRCLK_MSTR | WM2200_AIF1TX_LRCLK_INV, lrclk); snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_3, WM2200_AIF1TX_LRCLK_MSTR | WM2200_AIF1TX_LRCLK_INV, lrclk); snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_5, WM2200_AIF1_FMT_MASK, fmt_val); return 0; } static int wm2200_sr_code[] = { 0, 12000, 24000, 48000, 96000, 192000, 384000, 768000, 0, 11025, 22050, 44100, 88200, 176400, 352800, 705600, 4000, 8000, 16000, 32000, 64000, 128000, 256000, 512000, }; #define WM2200_NUM_BCLK_RATES 12 static int wm2200_bclk_rates_dat[WM2200_NUM_BCLK_RATES] = { 6144000, 3072000, 2048000, 1536000, 768000, 512000, 384000, 256000, 192000, 128000, 96000, 64000, }; static int wm2200_bclk_rates_cd[WM2200_NUM_BCLK_RATES] = { 5644800, 3763200, 2882400, 1881600, 1411200, 705600, 470400, 352800, 176400, 117600, 88200, 58800, }; static int wm2200_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec); int i, bclk, lrclk, wl, fl, sr_code; int *bclk_rates; /* Data sizes if not using TDM */ wl = snd_pcm_format_width(params_format(params)); if (wl < 0) return wl; fl = snd_soc_params_to_frame_size(params); if (fl < 0) return fl; dev_dbg(codec->dev, "Word length %d bits, frame length %d bits\n", wl, fl); /* Target BCLK rate */ bclk = snd_soc_params_to_bclk(params); if (bclk < 0) return bclk; if (!wm2200->sysclk) { dev_err(codec->dev, "SYSCLK has no rate set\n"); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(wm2200_sr_code); i++) if (wm2200_sr_code[i] == params_rate(params)) break; if (i == ARRAY_SIZE(wm2200_sr_code)) { dev_err(codec->dev, "Unsupported sample rate: %dHz\n", params_rate(params)); return -EINVAL; } sr_code = i; dev_dbg(codec->dev, "Target BCLK is %dHz, using %dHz SYSCLK\n", bclk, wm2200->sysclk); if (wm2200->sysclk % 4000) bclk_rates = wm2200_bclk_rates_cd; else bclk_rates = wm2200_bclk_rates_dat; for (i = 0; i < WM2200_NUM_BCLK_RATES; i++) if (bclk_rates[i] >= bclk && (bclk_rates[i] % bclk == 0)) break; if (i == WM2200_NUM_BCLK_RATES) { dev_err(codec->dev, "No valid BCLK for %dHz found from %dHz SYSCLK\n", bclk, wm2200->sysclk); return -EINVAL; } bclk = i; dev_dbg(codec->dev, "Setting %dHz BCLK\n", bclk_rates[bclk]); snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_1, WM2200_AIF1_BCLK_DIV_MASK, bclk); lrclk = bclk_rates[bclk] / params_rate(params); dev_dbg(codec->dev, "Setting %dHz LRCLK\n", bclk_rates[bclk] / lrclk); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || dai->symmetric_rates) snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_7, WM2200_AIF1RX_BCPF_MASK, lrclk); else snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_6, WM2200_AIF1TX_BCPF_MASK, lrclk); i = (wl << WM2200_AIF1TX_WL_SHIFT) | wl; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_9, WM2200_AIF1RX_WL_MASK | WM2200_AIF1RX_SLOT_LEN_MASK, i); else snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_8, WM2200_AIF1TX_WL_MASK | WM2200_AIF1TX_SLOT_LEN_MASK, i); snd_soc_update_bits(codec, WM2200_CLOCKING_4, WM2200_SAMPLE_RATE_1_MASK, sr_code); return 0; } static const struct snd_soc_dai_ops wm2200_dai_ops = { .set_fmt = wm2200_set_fmt, .hw_params = wm2200_hw_params, }; static int wm2200_set_sysclk(struct snd_soc_codec *codec, int clk_id, int source, unsigned int freq, int dir) { struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec); int fval; switch (clk_id) { case WM2200_CLK_SYSCLK: break; default: dev_err(codec->dev, "Unknown clock %d\n", clk_id); return -EINVAL; } switch (source) { case WM2200_CLKSRC_MCLK1: case WM2200_CLKSRC_MCLK2: case WM2200_CLKSRC_FLL: case WM2200_CLKSRC_BCLK1: break; default: dev_err(codec->dev, "Invalid source %d\n", source); return -EINVAL; } switch (freq) { case 22579200: case 24576000: fval = 2; break; default: dev_err(codec->dev, "Invalid clock rate: %d\n", freq); return -EINVAL; } /* TODO: Check if MCLKs are in use and enable/disable pulls to * match. */ snd_soc_update_bits(codec, WM2200_CLOCKING_3, WM2200_SYSCLK_FREQ_MASK | WM2200_SYSCLK_SRC_MASK, fval << WM2200_SYSCLK_FREQ_SHIFT | source); wm2200->sysclk = freq; return 0; } struct _fll_div { u16 fll_fratio; u16 fll_outdiv; u16 fll_refclk_div; u16 n; u16 theta; u16 lambda; }; static struct { unsigned int min; unsigned int max; u16 fll_fratio; int ratio; } fll_fratios[] = { { 0, 64000, 4, 16 }, { 64000, 128000, 3, 8 }, { 128000, 256000, 2, 4 }, { 256000, 1000000, 1, 2 }, { 1000000, 13500000, 0, 1 }, }; static int fll_factors(struct _fll_div *fll_div, unsigned int Fref, unsigned int Fout) { unsigned int target; unsigned int div; unsigned int fratio, gcd_fll; int i; /* Fref must be <=13.5MHz */ div = 1; fll_div->fll_refclk_div = 0; while ((Fref / div) > 13500000) { div *= 2; fll_div->fll_refclk_div++; if (div > 8) { pr_err("Can't scale %dMHz input down to <=13.5MHz\n", Fref); return -EINVAL; } } pr_debug("FLL Fref=%u Fout=%u\n", Fref, Fout); /* Apply the division for our remaining calculations */ Fref /= div; /* Fvco should be 90-100MHz; don't check the upper bound */ div = 2; while (Fout * div < 90000000) { div++; if (div > 64) { pr_err("Unable to find FLL_OUTDIV for Fout=%uHz\n", Fout); return -EINVAL; } } target = Fout * div; fll_div->fll_outdiv = div - 1; pr_debug("FLL Fvco=%dHz\n", target); /* Find an appropraite FLL_FRATIO and factor it out of the target */ for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) { if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) { fll_div->fll_fratio = fll_fratios[i].fll_fratio; fratio = fll_fratios[i].ratio; break; } } if (i == ARRAY_SIZE(fll_fratios)) { pr_err("Unable to find FLL_FRATIO for Fref=%uHz\n", Fref); return -EINVAL; } fll_div->n = target / (fratio * Fref); if (target % Fref == 0) { fll_div->theta = 0; fll_div->lambda = 0; } else { gcd_fll = gcd(target, fratio * Fref); fll_div->theta = (target - (fll_div->n * fratio * Fref)) / gcd_fll; fll_div->lambda = (fratio * Fref) / gcd_fll; } pr_debug("FLL N=%x THETA=%x LAMBDA=%x\n", fll_div->n, fll_div->theta, fll_div->lambda); pr_debug("FLL_FRATIO=%x(%d) FLL_OUTDIV=%x FLL_REFCLK_DIV=%x\n", fll_div->fll_fratio, fratio, fll_div->fll_outdiv, fll_div->fll_refclk_div); return 0; } static int wm2200_set_fll(struct snd_soc_codec *codec, int fll_id, int source, unsigned int Fref, unsigned int Fout) { struct i2c_client *i2c = to_i2c_client(codec->dev); struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec); struct _fll_div factors; int ret, i, timeout; if (!Fout) { dev_dbg(codec->dev, "FLL disabled"); if (wm2200->fll_fout) pm_runtime_put(codec->dev); wm2200->fll_fout = 0; snd_soc_update_bits(codec, WM2200_FLL_CONTROL_1, WM2200_FLL_ENA, 0); return 0; } switch (source) { case WM2200_FLL_SRC_MCLK1: case WM2200_FLL_SRC_MCLK2: case WM2200_FLL_SRC_BCLK: break; default: dev_err(codec->dev, "Invalid FLL source %d\n", source); return -EINVAL; } ret = fll_factors(&factors, Fref, Fout); if (ret < 0) return ret; /* Disable the FLL while we reconfigure */ snd_soc_update_bits(codec, WM2200_FLL_CONTROL_1, WM2200_FLL_ENA, 0); snd_soc_update_bits(codec, WM2200_FLL_CONTROL_2, WM2200_FLL_OUTDIV_MASK | WM2200_FLL_FRATIO_MASK, (factors.fll_outdiv << WM2200_FLL_OUTDIV_SHIFT) | factors.fll_fratio); if (factors.theta) { snd_soc_update_bits(codec, WM2200_FLL_CONTROL_3, WM2200_FLL_FRACN_ENA, WM2200_FLL_FRACN_ENA); snd_soc_update_bits(codec, WM2200_FLL_EFS_2, WM2200_FLL_EFS_ENA, WM2200_FLL_EFS_ENA); } else { snd_soc_update_bits(codec, WM2200_FLL_CONTROL_3, WM2200_FLL_FRACN_ENA, 0); snd_soc_update_bits(codec, WM2200_FLL_EFS_2, WM2200_FLL_EFS_ENA, 0); } snd_soc_update_bits(codec, WM2200_FLL_CONTROL_4, WM2200_FLL_THETA_MASK, factors.theta); snd_soc_update_bits(codec, WM2200_FLL_CONTROL_6, WM2200_FLL_N_MASK, factors.n); snd_soc_update_bits(codec, WM2200_FLL_CONTROL_7, WM2200_FLL_CLK_REF_DIV_MASK | WM2200_FLL_CLK_REF_SRC_MASK, (factors.fll_refclk_div << WM2200_FLL_CLK_REF_DIV_SHIFT) | source); snd_soc_update_bits(codec, WM2200_FLL_EFS_1, WM2200_FLL_LAMBDA_MASK, factors.lambda); /* Clear any pending completions */ try_wait_for_completion(&wm2200->fll_lock); pm_runtime_get_sync(codec->dev); snd_soc_update_bits(codec, WM2200_FLL_CONTROL_1, WM2200_FLL_ENA, WM2200_FLL_ENA); if (i2c->irq) timeout = 2; else timeout = 50; snd_soc_update_bits(codec, WM2200_CLOCKING_3, WM2200_SYSCLK_ENA, WM2200_SYSCLK_ENA); /* Poll for the lock; will use the interrupt to exit quickly */ for (i = 0; i < timeout; i++) { if (i2c->irq) { ret = wait_for_completion_timeout(&wm2200->fll_lock, msecs_to_jiffies(25)); if (ret > 0) break; } else { msleep(1); } ret = snd_soc_read(codec, WM2200_INTERRUPT_RAW_STATUS_2); if (ret < 0) { dev_err(codec->dev, "Failed to read FLL status: %d\n", ret); continue; } if (ret & WM2200_FLL_LOCK_STS) break; } if (i == timeout) { dev_err(codec->dev, "FLL lock timed out\n"); pm_runtime_put(codec->dev); return -ETIMEDOUT; } wm2200->fll_src = source; wm2200->fll_fref = Fref; wm2200->fll_fout = Fout; dev_dbg(codec->dev, "FLL running %dHz->%dHz\n", Fref, Fout); return 0; } static int wm2200_dai_probe(struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; unsigned int val = 0; int ret; ret = snd_soc_read(codec, WM2200_GPIO_CTRL_1); if (ret >= 0) { if ((ret & WM2200_GP1_FN_MASK) != 0) { dai->symmetric_rates = true; val = WM2200_AIF1TX_LRCLK_SRC; } } else { dev_err(codec->dev, "Failed to read GPIO 1 config: %d\n", ret); } snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_2, WM2200_AIF1TX_LRCLK_SRC, val); return 0; } #define WM2200_RATES SNDRV_PCM_RATE_8000_48000 #define WM2200_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static struct snd_soc_dai_driver wm2200_dai = { .name = "wm2200", .probe = wm2200_dai_probe, .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 2, .rates = WM2200_RATES, .formats = WM2200_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 2, .channels_max = 2, .rates = WM2200_RATES, .formats = WM2200_FORMATS, }, .ops = &wm2200_dai_ops, }; static struct snd_soc_codec_driver soc_codec_wm2200 = { .probe = wm2200_probe, .idle_bias_off = true, .ignore_pmdown_time = true, .set_sysclk = wm2200_set_sysclk, .set_pll = wm2200_set_fll, .controls = wm2200_snd_controls, .num_controls = ARRAY_SIZE(wm2200_snd_controls), .dapm_widgets = wm2200_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm2200_dapm_widgets), .dapm_routes = wm2200_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm2200_dapm_routes), }; static irqreturn_t wm2200_irq(int irq, void *data) { struct wm2200_priv *wm2200 = data; unsigned int val, mask; int ret; ret = regmap_read(wm2200->regmap, WM2200_INTERRUPT_STATUS_2, &val); if (ret != 0) { dev_err(wm2200->dev, "Failed to read IRQ status: %d\n", ret); return IRQ_NONE; } ret = regmap_read(wm2200->regmap, WM2200_INTERRUPT_STATUS_2_MASK, &mask); if (ret != 0) { dev_warn(wm2200->dev, "Failed to read IRQ mask: %d\n", ret); mask = 0; } val &= ~mask; if (val & WM2200_FLL_LOCK_EINT) { dev_dbg(wm2200->dev, "FLL locked\n"); complete(&wm2200->fll_lock); } if (val) { regmap_write(wm2200->regmap, WM2200_INTERRUPT_STATUS_2, val); return IRQ_HANDLED; } else { return IRQ_NONE; } } static const struct regmap_config wm2200_regmap = { .reg_bits = 16, .val_bits = 16, .max_register = WM2200_MAX_REGISTER + (ARRAY_SIZE(wm2200_ranges) * WM2200_DSP_SPACING), .reg_defaults = wm2200_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm2200_reg_defaults), .volatile_reg = wm2200_volatile_register, .readable_reg = wm2200_readable_register, .cache_type = REGCACHE_RBTREE, .ranges = wm2200_ranges, .num_ranges = ARRAY_SIZE(wm2200_ranges), }; static const unsigned int wm2200_dig_vu[] = { WM2200_DAC_DIGITAL_VOLUME_1L, WM2200_DAC_DIGITAL_VOLUME_1R, WM2200_DAC_DIGITAL_VOLUME_2L, WM2200_DAC_DIGITAL_VOLUME_2R, WM2200_ADC_DIGITAL_VOLUME_1L, WM2200_ADC_DIGITAL_VOLUME_1R, WM2200_ADC_DIGITAL_VOLUME_2L, WM2200_ADC_DIGITAL_VOLUME_2R, WM2200_ADC_DIGITAL_VOLUME_3L, WM2200_ADC_DIGITAL_VOLUME_3R, }; static const unsigned int wm2200_mic_ctrl_reg[] = { WM2200_IN1L_CONTROL, WM2200_IN2L_CONTROL, WM2200_IN3L_CONTROL, }; static int wm2200_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm2200_pdata *pdata = dev_get_platdata(&i2c->dev); struct wm2200_priv *wm2200; unsigned int reg; int ret, i; int val; wm2200 = devm_kzalloc(&i2c->dev, sizeof(struct wm2200_priv), GFP_KERNEL); if (wm2200 == NULL) return -ENOMEM; wm2200->dev = &i2c->dev; init_completion(&wm2200->fll_lock); wm2200->regmap = devm_regmap_init_i2c(i2c, &wm2200_regmap); if (IS_ERR(wm2200->regmap)) { ret = PTR_ERR(wm2200->regmap); dev_err(&i2c->dev, "Failed to allocate register map: %d\n", ret); return ret; } for (i = 0; i < 2; i++) { wm2200->dsp[i].type = WMFW_ADSP1; wm2200->dsp[i].part = "wm2200"; wm2200->dsp[i].num = i + 1; wm2200->dsp[i].dev = &i2c->dev; wm2200->dsp[i].regmap = wm2200->regmap; wm2200->dsp[i].sysclk_reg = WM2200_CLOCKING_3; wm2200->dsp[i].sysclk_mask = WM2200_SYSCLK_FREQ_MASK; wm2200->dsp[i].sysclk_shift = WM2200_SYSCLK_FREQ_SHIFT; } wm2200->dsp[0].base = WM2200_DSP1_CONTROL_1; wm2200->dsp[0].mem = wm2200_dsp1_regions; wm2200->dsp[0].num_mems = ARRAY_SIZE(wm2200_dsp1_regions); wm2200->dsp[1].base = WM2200_DSP2_CONTROL_1; wm2200->dsp[1].mem = wm2200_dsp2_regions; wm2200->dsp[1].num_mems = ARRAY_SIZE(wm2200_dsp2_regions); for (i = 0; i < ARRAY_SIZE(wm2200->dsp); i++) wm_adsp1_init(&wm2200->dsp[i]); if (pdata) wm2200->pdata = *pdata; i2c_set_clientdata(i2c, wm2200); for (i = 0; i < ARRAY_SIZE(wm2200->core_supplies); i++) wm2200->core_supplies[i].supply = wm2200_core_supply_names[i]; ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm2200->core_supplies), wm2200->core_supplies); if (ret != 0) { dev_err(&i2c->dev, "Failed to request core supplies: %d\n", ret); return ret; } ret = regulator_bulk_enable(ARRAY_SIZE(wm2200->core_supplies), wm2200->core_supplies); if (ret != 0) { dev_err(&i2c->dev, "Failed to enable core supplies: %d\n", ret); return ret; } if (wm2200->pdata.ldo_ena) { ret = devm_gpio_request_one(&i2c->dev, wm2200->pdata.ldo_ena, GPIOF_OUT_INIT_HIGH, "WM2200 LDOENA"); if (ret < 0) { dev_err(&i2c->dev, "Failed to request LDOENA %d: %d\n", wm2200->pdata.ldo_ena, ret); goto err_enable; } msleep(2); } if (wm2200->pdata.reset) { ret = devm_gpio_request_one(&i2c->dev, wm2200->pdata.reset, GPIOF_OUT_INIT_HIGH, "WM2200 /RESET"); if (ret < 0) { dev_err(&i2c->dev, "Failed to request /RESET %d: %d\n", wm2200->pdata.reset, ret); goto err_ldo; } } ret = regmap_read(wm2200->regmap, WM2200_SOFTWARE_RESET, &reg); if (ret < 0) { dev_err(&i2c->dev, "Failed to read ID register: %d\n", ret); goto err_reset; } switch (reg) { case 0x2200: break; default: dev_err(&i2c->dev, "Device is not a WM2200, ID is %x\n", reg); ret = -EINVAL; goto err_reset; } ret = regmap_read(wm2200->regmap, WM2200_DEVICE_REVISION, &reg); if (ret < 0) { dev_err(&i2c->dev, "Failed to read revision register\n"); goto err_reset; } wm2200->rev = reg & WM2200_DEVICE_REVISION_MASK; dev_info(&i2c->dev, "revision %c\n", wm2200->rev + 'A'); switch (wm2200->rev) { case 0: case 1: ret = regmap_register_patch(wm2200->regmap, wm2200_reva_patch, ARRAY_SIZE(wm2200_reva_patch)); if (ret != 0) { dev_err(&i2c->dev, "Failed to register patch: %d\n", ret); } break; default: break; } ret = wm2200_reset(wm2200); if (ret < 0) { dev_err(&i2c->dev, "Failed to issue reset\n"); goto err_reset; } for (i = 0; i < ARRAY_SIZE(wm2200->pdata.gpio_defaults); i++) { if (!wm2200->pdata.gpio_defaults[i]) continue; regmap_write(wm2200->regmap, WM2200_GPIO_CTRL_1 + i, wm2200->pdata.gpio_defaults[i]); } for (i = 0; i < ARRAY_SIZE(wm2200_dig_vu); i++) regmap_update_bits(wm2200->regmap, wm2200_dig_vu[i], WM2200_OUT_VU, WM2200_OUT_VU); /* Assign slots 1-6 to channels 1-6 for both TX and RX */ for (i = 0; i < 6; i++) { regmap_write(wm2200->regmap, WM2200_AUDIO_IF_1_10 + i, i); regmap_write(wm2200->regmap, WM2200_AUDIO_IF_1_16 + i, i); } for (i = 0; i < WM2200_MAX_MICBIAS; i++) { if (!wm2200->pdata.micbias[i].mb_lvl && !wm2200->pdata.micbias[i].bypass) continue; /* Apply default for bypass mode */ if (!wm2200->pdata.micbias[i].mb_lvl) wm2200->pdata.micbias[i].mb_lvl = WM2200_MBIAS_LVL_1V5; val = (wm2200->pdata.micbias[i].mb_lvl -1) << WM2200_MICB1_LVL_SHIFT; if (wm2200->pdata.micbias[i].discharge) val |= WM2200_MICB1_DISCH; if (wm2200->pdata.micbias[i].fast_start) val |= WM2200_MICB1_RATE; if (wm2200->pdata.micbias[i].bypass) val |= WM2200_MICB1_MODE; regmap_update_bits(wm2200->regmap, WM2200_MIC_BIAS_CTRL_1 + i, WM2200_MICB1_LVL_MASK | WM2200_MICB1_DISCH | WM2200_MICB1_MODE | WM2200_MICB1_RATE, val); } for (i = 0; i < ARRAY_SIZE(wm2200->pdata.in_mode); i++) { regmap_update_bits(wm2200->regmap, wm2200_mic_ctrl_reg[i], WM2200_IN1_MODE_MASK | WM2200_IN1_DMIC_SUP_MASK, (wm2200->pdata.in_mode[i] << WM2200_IN1_MODE_SHIFT) | (wm2200->pdata.dmic_sup[i] << WM2200_IN1_DMIC_SUP_SHIFT)); } if (i2c->irq) { ret = request_threaded_irq(i2c->irq, NULL, wm2200_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "wm2200", wm2200); if (ret == 0) regmap_update_bits(wm2200->regmap, WM2200_INTERRUPT_STATUS_2_MASK, WM2200_FLL_LOCK_EINT, 0); else dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n", i2c->irq, ret); } pm_runtime_set_active(&i2c->dev); pm_runtime_enable(&i2c->dev); pm_request_idle(&i2c->dev); ret = snd_soc_register_codec(&i2c->dev, &soc_codec_wm2200, &wm2200_dai, 1); if (ret != 0) { dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret); goto err_pm_runtime; } return 0; err_pm_runtime: pm_runtime_disable(&i2c->dev); err_reset: if (wm2200->pdata.reset) gpio_set_value_cansleep(wm2200->pdata.reset, 0); err_ldo: if (wm2200->pdata.ldo_ena) gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0); err_enable: regulator_bulk_disable(ARRAY_SIZE(wm2200->core_supplies), wm2200->core_supplies); return ret; } static int wm2200_i2c_remove(struct i2c_client *i2c) { struct wm2200_priv *wm2200 = i2c_get_clientdata(i2c); snd_soc_unregister_codec(&i2c->dev); if (i2c->irq) free_irq(i2c->irq, wm2200); if (wm2200->pdata.reset) gpio_set_value_cansleep(wm2200->pdata.reset, 0); if (wm2200->pdata.ldo_ena) gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0); return 0; } #ifdef CONFIG_PM_RUNTIME static int wm2200_runtime_suspend(struct device *dev) { struct wm2200_priv *wm2200 = dev_get_drvdata(dev); regcache_cache_only(wm2200->regmap, true); regcache_mark_dirty(wm2200->regmap); if (wm2200->pdata.ldo_ena) gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0); regulator_bulk_disable(ARRAY_SIZE(wm2200->core_supplies), wm2200->core_supplies); return 0; } static int wm2200_runtime_resume(struct device *dev) { struct wm2200_priv *wm2200 = dev_get_drvdata(dev); int ret; ret = regulator_bulk_enable(ARRAY_SIZE(wm2200->core_supplies), wm2200->core_supplies); if (ret != 0) { dev_err(dev, "Failed to enable supplies: %d\n", ret); return ret; } if (wm2200->pdata.ldo_ena) { gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 1); msleep(2); } regcache_cache_only(wm2200->regmap, false); regcache_sync(wm2200->regmap); return 0; } #endif static struct dev_pm_ops wm2200_pm = { SET_RUNTIME_PM_OPS(wm2200_runtime_suspend, wm2200_runtime_resume, NULL) }; static const struct i2c_device_id wm2200_i2c_id[] = { { "wm2200", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm2200_i2c_id); static struct i2c_driver wm2200_i2c_driver = { .driver = { .name = "wm2200", .owner = THIS_MODULE, .pm = &wm2200_pm, }, .probe = wm2200_i2c_probe, .remove = wm2200_i2c_remove, .id_table = wm2200_i2c_id, }; module_i2c_driver(wm2200_i2c_driver); MODULE_DESCRIPTION("ASoC WM2200 driver"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL");
gpl-2.0
drod2169/Linux-3.11.x
arch/um/os-Linux/start_up.c
2809
12658
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <sched.h> #include <signal.h> #include <string.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/wait.h> #include <sys/time.h> #include <sys/resource.h> #include <asm/unistd.h> #include <init.h> #include <os.h> #include <mem_user.h> #include <ptrace_user.h> #include <registers.h> #include <skas.h> #include <skas_ptrace.h> static void ptrace_child(void) { int ret; /* Calling os_getpid because some libcs cached getpid incorrectly */ int pid = os_getpid(), ppid = getppid(); int sc_result; if (change_sig(SIGWINCH, 0) < 0 || ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) { perror("ptrace"); kill(pid, SIGKILL); } kill(pid, SIGSTOP); /* * This syscall will be intercepted by the parent. Don't call more than * once, please. */ sc_result = os_getpid(); if (sc_result == pid) /* Nothing modified by the parent, we are running normally. */ ret = 1; else if (sc_result == ppid) /* * Expected in check_ptrace and check_sysemu when they succeed * in modifying the stack frame */ ret = 0; else /* Serious trouble! This could be caused by a bug in host 2.6 * SKAS3/2.6 patch before release -V6, together with a bug in * the UML code itself. */ ret = 2; exit(ret); } static void fatal_perror(const char *str) { perror(str); exit(1); } static void fatal(char *fmt, ...) { va_list list; va_start(list, fmt); vfprintf(stderr, fmt, list); va_end(list); exit(1); } static void non_fatal(char *fmt, ...) { va_list list; va_start(list, fmt); vfprintf(stderr, fmt, list); va_end(list); } static int start_ptraced_child(void) { int pid, n, status; pid = fork(); if (pid == 0) ptrace_child(); else if (pid < 0) fatal_perror("start_ptraced_child : fork failed"); CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_ptrace : waitpid failed"); if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) fatal("check_ptrace : expected SIGSTOP, got status = %d", status); return pid; } /* When testing for SYSEMU support, if it is one of the broken versions, we * must just avoid using sysemu, not panic, but only if SYSEMU features are * broken. * So only for SYSEMU features we test mustpanic, while normal host features * must work anyway! */ static int stop_ptraced_child(int pid, int exitcode, int mustexit) { int status, n, ret = 0; if (ptrace(PTRACE_CONT, pid, 0, 0) < 0) { perror("stop_ptraced_child : ptrace failed"); return -1; } CATCH_EINTR(n = waitpid(pid, &status, 0)); if (!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) { int exit_with = WEXITSTATUS(status); if (exit_with == 2) non_fatal("check_ptrace : child exited with status 2. " "\nDisabling SYSEMU support.\n"); non_fatal("check_ptrace : child exited with exitcode %d, while " "expecting %d; status 0x%x\n", exit_with, exitcode, status); if (mustexit) exit(1); ret = -1; } return ret; } /* Changed only during early boot */ int ptrace_faultinfo; static int disable_ptrace_faultinfo; int ptrace_ldt; static int disable_ptrace_ldt; int proc_mm; static int disable_proc_mm; int have_switch_mm; static int disable_switch_mm; int skas_needs_stub; static int __init skas0_cmd_param(char *str, int* add) { disable_ptrace_faultinfo = 1; disable_ptrace_ldt = 1; disable_proc_mm = 1; disable_switch_mm = 1; return 0; } /* The two __uml_setup would conflict, without this stupid alias. */ static int __init mode_skas0_cmd_param(char *str, int* add) __attribute__((alias("skas0_cmd_param"))); __uml_setup("skas0", skas0_cmd_param, "skas0\n" " Disables SKAS3 and SKAS4 usage, so that SKAS0 is used\n\n"); __uml_setup("mode=skas0", mode_skas0_cmd_param, "mode=skas0\n" " Disables SKAS3 and SKAS4 usage, so that SKAS0 is used.\n\n"); /* Changed only during early boot */ static int force_sysemu_disabled = 0; static int __init nosysemu_cmd_param(char *str, int* add) { force_sysemu_disabled = 1; return 0; } __uml_setup("nosysemu", nosysemu_cmd_param, "nosysemu\n" " Turns off syscall emulation patch for ptrace (SYSEMU) on.\n" " SYSEMU is a performance-patch introduced by Laurent Vivier. It changes\n" " behaviour of ptrace() and helps reducing host context switch rate.\n" " To make it working, you need a kernel patch for your host, too.\n" " See http://perso.wanadoo.fr/laurent.vivier/UML/ for further \n" " information.\n\n"); static void __init check_sysemu(void) { unsigned long regs[MAX_REG_NR]; int pid, n, status, count=0; non_fatal("Checking syscall emulation patch for ptrace..."); sysemu_supported = 0; pid = start_ptraced_child(); if (ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0) goto fail; CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_sysemu : wait failed"); if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP)) fatal("check_sysemu : expected SIGTRAP, got status = %d\n", status); if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) fatal_perror("check_sysemu : PTRACE_GETREGS failed"); if (PT_SYSCALL_NR(regs) != __NR_getpid) { non_fatal("check_sysemu got system call number %d, " "expected %d...", PT_SYSCALL_NR(regs), __NR_getpid); goto fail; } n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_RET_OFFSET, os_getpid()); if (n < 0) { non_fatal("check_sysemu : failed to modify system call " "return"); goto fail; } if (stop_ptraced_child(pid, 0, 0) < 0) goto fail_stopped; sysemu_supported = 1; non_fatal("OK\n"); set_using_sysemu(!force_sysemu_disabled); non_fatal("Checking advanced syscall emulation patch for ptrace..."); pid = start_ptraced_child(); if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *) PTRACE_O_TRACESYSGOOD) < 0)) fatal_perror("check_sysemu: PTRACE_OLDSETOPTIONS failed"); while (1) { count++; if (ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0) goto fail; CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_sysemu: wait failed"); if (WIFSTOPPED(status) && (WSTOPSIG(status) == (SIGTRAP|0x80))) { if (!count) { non_fatal("check_sysemu: SYSEMU_SINGLESTEP " "doesn't singlestep"); goto fail; } n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_RET_OFFSET, os_getpid()); if (n < 0) fatal_perror("check_sysemu : failed to modify " "system call return"); break; } else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP)) count++; else { non_fatal("check_sysemu: expected SIGTRAP or " "(SIGTRAP | 0x80), got status = %d\n", status); goto fail; } } if (stop_ptraced_child(pid, 0, 0) < 0) goto fail_stopped; sysemu_supported = 2; non_fatal("OK\n"); if (!force_sysemu_disabled) set_using_sysemu(sysemu_supported); return; fail: stop_ptraced_child(pid, 1, 0); fail_stopped: non_fatal("missing\n"); } static void __init check_ptrace(void) { int pid, syscall, n, status; non_fatal("Checking that ptrace can change system call numbers..."); pid = start_ptraced_child(); if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *) PTRACE_O_TRACESYSGOOD) < 0)) fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed"); while (1) { if (ptrace(PTRACE_SYSCALL, pid, 0, 0) < 0) fatal_perror("check_ptrace : ptrace failed"); CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_ptrace : wait failed"); if (!WIFSTOPPED(status) || (WSTOPSIG(status) != (SIGTRAP | 0x80))) fatal("check_ptrace : expected (SIGTRAP|0x80), " "got status = %d", status); syscall = ptrace(PTRACE_PEEKUSER, pid, PT_SYSCALL_NR_OFFSET, 0); if (syscall == __NR_getpid) { n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_NR_OFFSET, __NR_getppid); if (n < 0) fatal_perror("check_ptrace : failed to modify " "system call"); break; } } stop_ptraced_child(pid, 0, 1); non_fatal("OK\n"); check_sysemu(); } extern void check_tmpexec(void); static void __init check_coredump_limit(void) { struct rlimit lim; int err = getrlimit(RLIMIT_CORE, &lim); if (err) { perror("Getting core dump limit"); return; } printf("Core dump limits :\n\tsoft - "); if (lim.rlim_cur == RLIM_INFINITY) printf("NONE\n"); else printf("%lu\n", lim.rlim_cur); printf("\thard - "); if (lim.rlim_max == RLIM_INFINITY) printf("NONE\n"); else printf("%lu\n", lim.rlim_max); } void __init os_early_checks(void) { int pid; /* Print out the core dump limits early */ check_coredump_limit(); check_ptrace(); /* Need to check this early because mmapping happens before the * kernel is running. */ check_tmpexec(); pid = start_ptraced_child(); if (init_registers(pid)) fatal("Failed to initialize default registers"); stop_ptraced_child(pid, 1, 1); } static int __init noprocmm_cmd_param(char *str, int* add) { disable_proc_mm = 1; return 0; } __uml_setup("noprocmm", noprocmm_cmd_param, "noprocmm\n" " Turns off usage of /proc/mm, even if host supports it.\n" " To support /proc/mm, the host needs to be patched using\n" " the current skas3 patch.\n\n"); static int __init noptracefaultinfo_cmd_param(char *str, int* add) { disable_ptrace_faultinfo = 1; return 0; } __uml_setup("noptracefaultinfo", noptracefaultinfo_cmd_param, "noptracefaultinfo\n" " Turns off usage of PTRACE_FAULTINFO, even if host supports\n" " it. To support PTRACE_FAULTINFO, the host needs to be patched\n" " using the current skas3 patch.\n\n"); static int __init noptraceldt_cmd_param(char *str, int* add) { disable_ptrace_ldt = 1; return 0; } __uml_setup("noptraceldt", noptraceldt_cmd_param, "noptraceldt\n" " Turns off usage of PTRACE_LDT, even if host supports it.\n" " To support PTRACE_LDT, the host needs to be patched using\n" " the current skas3 patch.\n\n"); static inline void check_skas3_ptrace_faultinfo(void) { struct ptrace_faultinfo fi; int pid, n; non_fatal(" - PTRACE_FAULTINFO..."); pid = start_ptraced_child(); n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi); if (n < 0) { if (errno == EIO) non_fatal("not found\n"); else perror("not found"); } else if (disable_ptrace_faultinfo) non_fatal("found but disabled on command line\n"); else { ptrace_faultinfo = 1; non_fatal("found\n"); } stop_ptraced_child(pid, 1, 1); } static inline void check_skas3_ptrace_ldt(void) { #ifdef PTRACE_LDT int pid, n; unsigned char ldtbuf[40]; struct ptrace_ldt ldt_op = (struct ptrace_ldt) { .func = 2, /* read default ldt */ .ptr = ldtbuf, .bytecount = sizeof(ldtbuf)}; non_fatal(" - PTRACE_LDT..."); pid = start_ptraced_child(); n = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op); if (n < 0) { if (errno == EIO) non_fatal("not found\n"); else perror("not found"); } else if (disable_ptrace_ldt) non_fatal("found, but use is disabled\n"); else { ptrace_ldt = 1; non_fatal("found\n"); } stop_ptraced_child(pid, 1, 1); #endif } static inline void check_skas3_proc_mm(void) { non_fatal(" - /proc/mm..."); if (access("/proc/mm", W_OK) < 0) perror("not found"); else if (disable_proc_mm) non_fatal("found but disabled on command line\n"); else { proc_mm = 1; non_fatal("found\n"); } } void can_do_skas(void) { non_fatal("Checking for the skas3 patch in the host:\n"); check_skas3_proc_mm(); check_skas3_ptrace_faultinfo(); check_skas3_ptrace_ldt(); if (!proc_mm || !ptrace_faultinfo || !ptrace_ldt) skas_needs_stub = 1; } int __init parse_iomem(char *str, int *add) { struct iomem_region *new; struct stat64 buf; char *file, *driver; int fd, size; driver = str; file = strchr(str,','); if (file == NULL) { fprintf(stderr, "parse_iomem : failed to parse iomem\n"); goto out; } *file = '\0'; file++; fd = open(file, O_RDWR, 0); if (fd < 0) { perror("parse_iomem - Couldn't open io file"); goto out; } if (fstat64(fd, &buf) < 0) { perror("parse_iomem - cannot stat_fd file"); goto out_close; } new = malloc(sizeof(*new)); if (new == NULL) { perror("Couldn't allocate iomem_region struct"); goto out_close; } size = (buf.st_size + UM_KERN_PAGE_SIZE) & ~(UM_KERN_PAGE_SIZE - 1); *new = ((struct iomem_region) { .next = iomem_regions, .driver = driver, .fd = fd, .size = size, .phys = 0, .virt = 0 }); iomem_regions = new; iomem_size += new->size + UM_KERN_PAGE_SIZE; return 0; out_close: close(fd); out: return 1; }
gpl-2.0
UniqueDroid/lge-kernel-x3-p880
fs/reiserfs/stree.c
3321
65835
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ /* * Written by Anatoly P. Pinchuk pap@namesys.botik.ru * Programm System Institute * Pereslavl-Zalessky Russia */ /* * This file contains functions dealing with S+tree * * B_IS_IN_TREE * copy_item_head * comp_short_keys * comp_keys * comp_short_le_keys * le_key2cpu_key * comp_le_keys * bin_search * get_lkey * get_rkey * key_in_buffer * decrement_bcount * reiserfs_check_path * pathrelse_and_restore * pathrelse * search_by_key_reada * search_by_key * search_for_position_by_key * comp_items * prepare_for_direct_item * prepare_for_direntry_item * prepare_for_delete_or_cut * calc_deleted_bytes_number * init_tb_struct * padd_item * reiserfs_delete_item * reiserfs_delete_solid_item * reiserfs_delete_object * maybe_indirect_to_direct * indirect_to_direct_roll_back * reiserfs_cut_from_item * truncate_directory * reiserfs_do_truncate * reiserfs_paste_into_item * reiserfs_insert_item */ #include <linux/time.h> #include <linux/string.h> #include <linux/pagemap.h> #include <linux/reiserfs_fs.h> #include <linux/buffer_head.h> #include <linux/quotaops.h> /* Does the buffer contain a disk block which is in the tree. */ inline int B_IS_IN_TREE(const struct buffer_head *bh) { RFALSE(B_LEVEL(bh) > MAX_HEIGHT, "PAP-1010: block (%b) has too big level (%z)", bh, bh); return (B_LEVEL(bh) != FREE_LEVEL); } // // to gets item head in le form // inline void copy_item_head(struct item_head *to, const struct item_head *from) { memcpy(to, from, IH_SIZE); } /* k1 is pointer to on-disk structure which is stored in little-endian form. k2 is pointer to cpu variable. For key of items of the same object this returns 0. Returns: -1 if key1 < key2 0 if key1 == key2 1 if key1 > key2 */ inline int comp_short_keys(const struct reiserfs_key *le_key, const struct cpu_key *cpu_key) { __u32 n; n = le32_to_cpu(le_key->k_dir_id); if (n < cpu_key->on_disk_key.k_dir_id) return -1; if (n > cpu_key->on_disk_key.k_dir_id) return 1; n = le32_to_cpu(le_key->k_objectid); if (n < cpu_key->on_disk_key.k_objectid) return -1; if (n > cpu_key->on_disk_key.k_objectid) return 1; return 0; } /* k1 is pointer to on-disk structure which is stored in little-endian form. k2 is pointer to cpu variable. Compare keys using all 4 key fields. Returns: -1 if key1 < key2 0 if key1 = key2 1 if key1 > key2 */ static inline int comp_keys(const struct reiserfs_key *le_key, const struct cpu_key *cpu_key) { int retval; retval = comp_short_keys(le_key, cpu_key); if (retval) return retval; if (le_key_k_offset(le_key_version(le_key), le_key) < cpu_key_k_offset(cpu_key)) return -1; if (le_key_k_offset(le_key_version(le_key), le_key) > cpu_key_k_offset(cpu_key)) return 1; if (cpu_key->key_length == 3) return 0; /* this part is needed only when tail conversion is in progress */ if (le_key_k_type(le_key_version(le_key), le_key) < cpu_key_k_type(cpu_key)) return -1; if (le_key_k_type(le_key_version(le_key), le_key) > cpu_key_k_type(cpu_key)) return 1; return 0; } inline int comp_short_le_keys(const struct reiserfs_key *key1, const struct reiserfs_key *key2) { __u32 *k1_u32, *k2_u32; int key_length = REISERFS_SHORT_KEY_LEN; k1_u32 = (__u32 *) key1; k2_u32 = (__u32 *) key2; for (; key_length--; ++k1_u32, ++k2_u32) { if (le32_to_cpu(*k1_u32) < le32_to_cpu(*k2_u32)) return -1; if (le32_to_cpu(*k1_u32) > le32_to_cpu(*k2_u32)) return 1; } return 0; } inline void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from) { int version; to->on_disk_key.k_dir_id = le32_to_cpu(from->k_dir_id); to->on_disk_key.k_objectid = le32_to_cpu(from->k_objectid); // find out version of the key version = le_key_version(from); to->version = version; to->on_disk_key.k_offset = le_key_k_offset(version, from); to->on_disk_key.k_type = le_key_k_type(version, from); } // this does not say which one is bigger, it only returns 1 if keys // are not equal, 0 otherwise inline int comp_le_keys(const struct reiserfs_key *k1, const struct reiserfs_key *k2) { return memcmp(k1, k2, sizeof(struct reiserfs_key)); } /************************************************************************** * Binary search toolkit function * * Search for an item in the array by the item key * * Returns: 1 if found, 0 if not found; * * *pos = number of the searched element if found, else the * * number of the first element that is larger than key. * **************************************************************************/ /* For those not familiar with binary search: lbound is the leftmost item that it could be, rbound the rightmost item that it could be. We examine the item halfway between lbound and rbound, and that tells us either that we can increase lbound, or decrease rbound, or that we have found it, or if lbound <= rbound that there are no possible items, and we have not found it. With each examination we cut the number of possible items it could be by one more than half rounded down, or we find it. */ static inline int bin_search(const void *key, /* Key to search for. */ const void *base, /* First item in the array. */ int num, /* Number of items in the array. */ int width, /* Item size in the array. searched. Lest the reader be confused, note that this is crafted as a general function, and when it is applied specifically to the array of item headers in a node, width is actually the item header size not the item size. */ int *pos /* Number of the searched for element. */ ) { int rbound, lbound, j; for (j = ((rbound = num - 1) + (lbound = 0)) / 2; lbound <= rbound; j = (rbound + lbound) / 2) switch (comp_keys ((struct reiserfs_key *)((char *)base + j * width), (struct cpu_key *)key)) { case -1: lbound = j + 1; continue; case 1: rbound = j - 1; continue; case 0: *pos = j; return ITEM_FOUND; /* Key found in the array. */ } /* bin_search did not find given key, it returns position of key, that is minimal and greater than the given one. */ *pos = lbound; return ITEM_NOT_FOUND; } /* Minimal possible key. It is never in the tree. */ const struct reiserfs_key MIN_KEY = { 0, 0, {{0, 0},} }; /* Maximal possible key. It is never in the tree. */ static const struct reiserfs_key MAX_KEY = { __constant_cpu_to_le32(0xffffffff), __constant_cpu_to_le32(0xffffffff), {{__constant_cpu_to_le32(0xffffffff), __constant_cpu_to_le32(0xffffffff)},} }; /* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom of the path, and going upwards. We must check the path's validity at each step. If the key is not in the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this case we return a special key, either MIN_KEY or MAX_KEY. */ static inline const struct reiserfs_key *get_lkey(const struct treepath *chk_path, const struct super_block *sb) { int position, path_offset = chk_path->path_length; struct buffer_head *parent; RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET, "PAP-5010: invalid offset in the path"); /* While not higher in path than first element. */ while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) { RFALSE(!buffer_uptodate (PATH_OFFSET_PBUFFER(chk_path, path_offset)), "PAP-5020: parent is not uptodate"); /* Parent at the path is not in the tree now. */ if (!B_IS_IN_TREE (parent = PATH_OFFSET_PBUFFER(chk_path, path_offset))) return &MAX_KEY; /* Check whether position in the parent is correct. */ if ((position = PATH_OFFSET_POSITION(chk_path, path_offset)) > B_NR_ITEMS(parent)) return &MAX_KEY; /* Check whether parent at the path really points to the child. */ if (B_N_CHILD_NUM(parent, position) != PATH_OFFSET_PBUFFER(chk_path, path_offset + 1)->b_blocknr) return &MAX_KEY; /* Return delimiting key if position in the parent is not equal to zero. */ if (position) return B_N_PDELIM_KEY(parent, position - 1); } /* Return MIN_KEY if we are in the root of the buffer tree. */ if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)-> b_blocknr == SB_ROOT_BLOCK(sb)) return &MIN_KEY; return &MAX_KEY; } /* Get delimiting key of the buffer at the path and its right neighbor. */ inline const struct reiserfs_key *get_rkey(const struct treepath *chk_path, const struct super_block *sb) { int position, path_offset = chk_path->path_length; struct buffer_head *parent; RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET, "PAP-5030: invalid offset in the path"); while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) { RFALSE(!buffer_uptodate (PATH_OFFSET_PBUFFER(chk_path, path_offset)), "PAP-5040: parent is not uptodate"); /* Parent at the path is not in the tree now. */ if (!B_IS_IN_TREE (parent = PATH_OFFSET_PBUFFER(chk_path, path_offset))) return &MIN_KEY; /* Check whether position in the parent is correct. */ if ((position = PATH_OFFSET_POSITION(chk_path, path_offset)) > B_NR_ITEMS(parent)) return &MIN_KEY; /* Check whether parent at the path really points to the child. */ if (B_N_CHILD_NUM(parent, position) != PATH_OFFSET_PBUFFER(chk_path, path_offset + 1)->b_blocknr) return &MIN_KEY; /* Return delimiting key if position in the parent is not the last one. */ if (position != B_NR_ITEMS(parent)) return B_N_PDELIM_KEY(parent, position); } /* Return MAX_KEY if we are in the root of the buffer tree. */ if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)-> b_blocknr == SB_ROOT_BLOCK(sb)) return &MAX_KEY; return &MIN_KEY; } /* Check whether a key is contained in the tree rooted from a buffer at a path. */ /* This works by looking at the left and right delimiting keys for the buffer in the last path_element in the path. These delimiting keys are stored at least one level above that buffer in the tree. If the buffer is the first or last node in the tree order then one of the delimiting keys may be absent, and in this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */ static inline int key_in_buffer(struct treepath *chk_path, /* Path which should be checked. */ const struct cpu_key *key, /* Key which should be checked. */ struct super_block *sb ) { RFALSE(!key || chk_path->path_length < FIRST_PATH_ELEMENT_OFFSET || chk_path->path_length > MAX_HEIGHT, "PAP-5050: pointer to the key(%p) is NULL or invalid path length(%d)", key, chk_path->path_length); RFALSE(!PATH_PLAST_BUFFER(chk_path)->b_bdev, "PAP-5060: device must not be NODEV"); if (comp_keys(get_lkey(chk_path, sb), key) == 1) /* left delimiting key is bigger, that the key we look for */ return 0; /* if ( comp_keys(key, get_rkey(chk_path, sb)) != -1 ) */ if (comp_keys(get_rkey(chk_path, sb), key) != 1) /* key must be less than right delimitiing key */ return 0; return 1; } int reiserfs_check_path(struct treepath *p) { RFALSE(p->path_length != ILLEGAL_PATH_ELEMENT_OFFSET, "path not properly relsed"); return 0; } /* Drop the reference to each buffer in a path and restore * dirty bits clean when preparing the buffer for the log. * This version should only be called from fix_nodes() */ void pathrelse_and_restore(struct super_block *sb, struct treepath *search_path) { int path_offset = search_path->path_length; RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET, "clm-4000: invalid path offset"); while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) { struct buffer_head *bh; bh = PATH_OFFSET_PBUFFER(search_path, path_offset--); reiserfs_restore_prepared_buffer(sb, bh); brelse(bh); } search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; } /* Drop the reference to each buffer in a path */ void pathrelse(struct treepath *search_path) { int path_offset = search_path->path_length; RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET, "PAP-5090: invalid path offset"); while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) brelse(PATH_OFFSET_PBUFFER(search_path, path_offset--)); search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; } static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) { struct block_head *blkh; struct item_head *ih; int used_space; int prev_location; int i; int nr; blkh = (struct block_head *)buf; if (blkh_level(blkh) != DISK_LEAF_NODE_LEVEL) { reiserfs_warning(NULL, "reiserfs-5080", "this should be caught earlier"); return 0; } nr = blkh_nr_item(blkh); if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) { /* item number is too big or too small */ reiserfs_warning(NULL, "reiserfs-5081", "nr_item seems wrong: %z", bh); return 0; } ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1; used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location(ih)); if (used_space != blocksize - blkh_free_space(blkh)) { /* free space does not match to calculated amount of use space */ reiserfs_warning(NULL, "reiserfs-5082", "free space seems wrong: %z", bh); return 0; } // FIXME: it is_leaf will hit performance too much - we may have // return 1 here /* check tables of item heads */ ih = (struct item_head *)(buf + BLKH_SIZE); prev_location = blocksize; for (i = 0; i < nr; i++, ih++) { if (le_ih_k_type(ih) == TYPE_ANY) { reiserfs_warning(NULL, "reiserfs-5083", "wrong item type for item %h", ih); return 0; } if (ih_location(ih) >= blocksize || ih_location(ih) < IH_SIZE * nr) { reiserfs_warning(NULL, "reiserfs-5084", "item location seems wrong: %h", ih); return 0; } if (ih_item_len(ih) < 1 || ih_item_len(ih) > MAX_ITEM_LEN(blocksize)) { reiserfs_warning(NULL, "reiserfs-5085", "item length seems wrong: %h", ih); return 0; } if (prev_location - ih_location(ih) != ih_item_len(ih)) { reiserfs_warning(NULL, "reiserfs-5086", "item location seems wrong " "(second one): %h", ih); return 0; } prev_location = ih_location(ih); } // one may imagine much more checks return 1; } /* returns 1 if buf looks like an internal node, 0 otherwise */ static int is_internal(char *buf, int blocksize, struct buffer_head *bh) { struct block_head *blkh; int nr; int used_space; blkh = (struct block_head *)buf; nr = blkh_level(blkh); if (nr <= DISK_LEAF_NODE_LEVEL || nr > MAX_HEIGHT) { /* this level is not possible for internal nodes */ reiserfs_warning(NULL, "reiserfs-5087", "this should be caught earlier"); return 0; } nr = blkh_nr_item(blkh); if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) { /* for internal which is not root we might check min number of keys */ reiserfs_warning(NULL, "reiserfs-5088", "number of key seems wrong: %z", bh); return 0; } used_space = BLKH_SIZE + KEY_SIZE * nr + DC_SIZE * (nr + 1); if (used_space != blocksize - blkh_free_space(blkh)) { reiserfs_warning(NULL, "reiserfs-5089", "free space seems wrong: %z", bh); return 0; } // one may imagine much more checks return 1; } // make sure that bh contains formatted node of reiserfs tree of // 'level'-th level static int is_tree_node(struct buffer_head *bh, int level) { if (B_LEVEL(bh) != level) { reiserfs_warning(NULL, "reiserfs-5090", "node level %d does " "not match to the expected one %d", B_LEVEL(bh), level); return 0; } if (level == DISK_LEAF_NODE_LEVEL) return is_leaf(bh->b_data, bh->b_size, bh); return is_internal(bh->b_data, bh->b_size, bh); } #define SEARCH_BY_KEY_READA 16 /* * The function is NOT SCHEDULE-SAFE! * It might unlock the write lock if we needed to wait for a block * to be read. Note that in this case it won't recover the lock to avoid * high contention resulting from too much lock requests, especially * the caller (search_by_key) will perform other schedule-unsafe * operations just after calling this function. * * @return true if we have unlocked */ static bool search_by_key_reada(struct super_block *s, struct buffer_head **bh, b_blocknr_t *b, int num) { int i, j; bool unlocked = false; for (i = 0; i < num; i++) { bh[i] = sb_getblk(s, b[i]); } /* * We are going to read some blocks on which we * have a reference. It's safe, though we might be * reading blocks concurrently changed if we release * the lock. But it's still fine because we check later * if the tree changed */ for (j = 0; j < i; j++) { /* * note, this needs attention if we are getting rid of the BKL * you have to make sure the prepared bit isn't set on this buffer */ if (!buffer_uptodate(bh[j])) { if (!unlocked) { reiserfs_write_unlock(s); unlocked = true; } ll_rw_block(READA, 1, bh + j); } brelse(bh[j]); } return unlocked; } /************************************************************************** * Algorithm SearchByKey * * look for item in the Disk S+Tree by its key * * Input: sb - super block * * key - pointer to the key to search * * Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR * * search_path - path from the root to the needed leaf * **************************************************************************/ /* This function fills up the path from the root to the leaf as it descends the tree looking for the key. It uses reiserfs_bread to try to find buffers in the cache given their block number. If it does not find them in the cache it reads them from disk. For each node search_by_key finds using reiserfs_bread it then uses bin_search to look through that node. bin_search will find the position of the block_number of the next node if it is looking through an internal node. If it is looking through a leaf node bin_search will find the position of the item which has key either equal to given key, or which is the maximal key less than the given key. search_by_key returns a path that must be checked for the correctness of the top of the path but need not be checked for the correctness of the bottom of the path */ /* The function is NOT SCHEDULE-SAFE! */ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to search. */ struct treepath *search_path,/* This structure was allocated and initialized by the calling function. It is filled up by this function. */ int stop_level /* How far down the tree to search. To stop at leaf level - set to DISK_LEAF_NODE_LEVEL */ ) { b_blocknr_t block_number; int expected_level; struct buffer_head *bh; struct path_element *last_element; int node_level, retval; int right_neighbor_of_leaf_node; int fs_gen; struct buffer_head *reada_bh[SEARCH_BY_KEY_READA]; b_blocknr_t reada_blocks[SEARCH_BY_KEY_READA]; int reada_count = 0; #ifdef CONFIG_REISERFS_CHECK int repeat_counter = 0; #endif PROC_INFO_INC(sb, search_by_key); /* As we add each node to a path we increase its count. This means that we must be careful to release all nodes in a path before we either discard the path struct or re-use the path struct, as we do here. */ pathrelse(search_path); right_neighbor_of_leaf_node = 0; /* With each iteration of this loop we search through the items in the current node, and calculate the next current node(next path element) for the next iteration of this loop.. */ block_number = SB_ROOT_BLOCK(sb); expected_level = -1; while (1) { #ifdef CONFIG_REISERFS_CHECK if (!(++repeat_counter % 50000)) reiserfs_warning(sb, "PAP-5100", "%s: there were %d iterations of " "while loop looking for key %K", current->comm, repeat_counter, key); #endif /* prep path to have another element added to it. */ last_element = PATH_OFFSET_PELEMENT(search_path, ++search_path->path_length); fs_gen = get_generation(sb); /* Read the next tree node, and set the last element in the path to have a pointer to it. */ if ((bh = last_element->pe_buffer = sb_getblk(sb, block_number))) { bool unlocked = false; if (!buffer_uptodate(bh) && reada_count > 1) /* may unlock the write lock */ unlocked = search_by_key_reada(sb, reada_bh, reada_blocks, reada_count); /* * If we haven't already unlocked the write lock, * then we need to do that here before reading * the current block */ if (!buffer_uptodate(bh) && !unlocked) { reiserfs_write_unlock(sb); unlocked = true; } ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (unlocked) reiserfs_write_lock(sb); if (!buffer_uptodate(bh)) goto io_error; } else { io_error: search_path->path_length--; pathrelse(search_path); return IO_ERROR; } reada_count = 0; if (expected_level == -1) expected_level = SB_TREE_HEIGHT(sb); expected_level--; /* It is possible that schedule occurred. We must check whether the key to search is still in the tree rooted from the current buffer. If not then repeat search from the root. */ if (fs_changed(fs_gen, sb) && (!B_IS_IN_TREE(bh) || B_LEVEL(bh) != expected_level || !key_in_buffer(search_path, key, sb))) { PROC_INFO_INC(sb, search_by_key_fs_changed); PROC_INFO_INC(sb, search_by_key_restarted); PROC_INFO_INC(sb, sbk_restarted[expected_level - 1]); pathrelse(search_path); /* Get the root block number so that we can repeat the search starting from the root. */ block_number = SB_ROOT_BLOCK(sb); expected_level = -1; right_neighbor_of_leaf_node = 0; /* repeat search from the root */ continue; } /* only check that the key is in the buffer if key is not equal to the MAX_KEY. Latter case is only possible in "finish_unfinished()" processing during mount. */ RFALSE(comp_keys(&MAX_KEY, key) && !key_in_buffer(search_path, key, sb), "PAP-5130: key is not in the buffer"); #ifdef CONFIG_REISERFS_CHECK if (REISERFS_SB(sb)->cur_tb) { print_cur_tb("5140"); reiserfs_panic(sb, "PAP-5140", "schedule occurred in do_balance!"); } #endif // make sure, that the node contents look like a node of // certain level if (!is_tree_node(bh, expected_level)) { reiserfs_error(sb, "vs-5150", "invalid format found in block %ld. " "Fsck?", bh->b_blocknr); pathrelse(search_path); return IO_ERROR; } /* ok, we have acquired next formatted node in the tree */ node_level = B_LEVEL(bh); PROC_INFO_BH_STAT(sb, bh, node_level - 1); RFALSE(node_level < stop_level, "vs-5152: tree level (%d) is less than stop level (%d)", node_level, stop_level); retval = bin_search(key, B_N_PITEM_HEAD(bh, 0), B_NR_ITEMS(bh), (node_level == DISK_LEAF_NODE_LEVEL) ? IH_SIZE : KEY_SIZE, &(last_element->pe_position)); if (node_level == stop_level) { return retval; } /* we are not in the stop level */ if (retval == ITEM_FOUND) /* item has been found, so we choose the pointer which is to the right of the found one */ last_element->pe_position++; /* if item was not found we choose the position which is to the left of the found item. This requires no code, bin_search did it already. */ /* So we have chosen a position in the current node which is an internal node. Now we calculate child block number by position in the node. */ block_number = B_N_CHILD_NUM(bh, last_element->pe_position); /* if we are going to read leaf nodes, try for read ahead as well */ if ((search_path->reada & PATH_READA) && node_level == DISK_LEAF_NODE_LEVEL + 1) { int pos = last_element->pe_position; int limit = B_NR_ITEMS(bh); struct reiserfs_key *le_key; if (search_path->reada & PATH_READA_BACK) limit = 0; while (reada_count < SEARCH_BY_KEY_READA) { if (pos == limit) break; reada_blocks[reada_count++] = B_N_CHILD_NUM(bh, pos); if (search_path->reada & PATH_READA_BACK) pos--; else pos++; /* * check to make sure we're in the same object */ le_key = B_N_PDELIM_KEY(bh, pos); if (le32_to_cpu(le_key->k_objectid) != key->on_disk_key.k_objectid) { break; } } } } } /* Form the path to an item and position in this item which contains file byte defined by key. If there is no such item corresponding to the key, we point the path to the item with maximal key less than key, and *pos_in_item is set to one past the last entry/byte in the item. If searching for entry in a directory item, and it is not found, *pos_in_item is set to one entry more than the entry with maximal key which is less than the sought key. Note that if there is no entry in this same node which is one more, then we point to an imaginary entry. for direct items, the position is in units of bytes, for indirect items the position is in units of blocknr entries, for directory items the position is in units of directory entries. */ /* The function is NOT SCHEDULE-SAFE! */ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super block. */ const struct cpu_key *p_cpu_key, /* Key to search (cpu variable) */ struct treepath *search_path /* Filled up by this function. */ ) { struct item_head *p_le_ih; /* pointer to on-disk structure */ int blk_size; loff_t item_offset, offset; struct reiserfs_dir_entry de; int retval; /* If searching for directory entry. */ if (is_direntry_cpu_key(p_cpu_key)) return search_by_entry_key(sb, p_cpu_key, search_path, &de); /* If not searching for directory entry. */ /* If item is found. */ retval = search_item(sb, p_cpu_key, search_path); if (retval == IO_ERROR) return retval; if (retval == ITEM_FOUND) { RFALSE(!ih_item_len (B_N_PITEM_HEAD (PATH_PLAST_BUFFER(search_path), PATH_LAST_POSITION(search_path))), "PAP-5165: item length equals zero"); pos_in_item(search_path) = 0; return POSITION_FOUND; } RFALSE(!PATH_LAST_POSITION(search_path), "PAP-5170: position equals zero"); /* Item is not found. Set path to the previous item. */ p_le_ih = B_N_PITEM_HEAD(PATH_PLAST_BUFFER(search_path), --PATH_LAST_POSITION(search_path)); blk_size = sb->s_blocksize; if (comp_short_keys(&(p_le_ih->ih_key), p_cpu_key)) { return FILE_NOT_FOUND; } // FIXME: quite ugly this far item_offset = le_ih_k_offset(p_le_ih); offset = cpu_key_k_offset(p_cpu_key); /* Needed byte is contained in the item pointed to by the path. */ if (item_offset <= offset && item_offset + op_bytes_number(p_le_ih, blk_size) > offset) { pos_in_item(search_path) = offset - item_offset; if (is_indirect_le_ih(p_le_ih)) { pos_in_item(search_path) /= blk_size; } return POSITION_FOUND; } /* Needed byte is not contained in the item pointed to by the path. Set pos_in_item out of the item. */ if (is_indirect_le_ih(p_le_ih)) pos_in_item(search_path) = ih_item_len(p_le_ih) / UNFM_P_SIZE; else pos_in_item(search_path) = ih_item_len(p_le_ih); return POSITION_NOT_FOUND; } /* Compare given item and item pointed to by the path. */ int comp_items(const struct item_head *stored_ih, const struct treepath *path) { struct buffer_head *bh = PATH_PLAST_BUFFER(path); struct item_head *ih; /* Last buffer at the path is not in the tree. */ if (!B_IS_IN_TREE(bh)) return 1; /* Last path position is invalid. */ if (PATH_LAST_POSITION(path) >= B_NR_ITEMS(bh)) return 1; /* we need only to know, whether it is the same item */ ih = get_ih(path); return memcmp(stored_ih, ih, IH_SIZE); } /* unformatted nodes are not logged anymore, ever. This is safe ** now */ #define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1) // block can not be forgotten as it is in I/O or held by someone #define block_in_use(bh) (buffer_locked(bh) || (held_by_others(bh))) // prepare for delete or cut of direct item static inline int prepare_for_direct_item(struct treepath *path, struct item_head *le_ih, struct inode *inode, loff_t new_file_length, int *cut_size) { loff_t round_len; if (new_file_length == max_reiserfs_offset(inode)) { /* item has to be deleted */ *cut_size = -(IH_SIZE + ih_item_len(le_ih)); return M_DELETE; } // new file gets truncated if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) { // round_len = ROUND_UP(new_file_length); /* this was new_file_length < le_ih ... */ if (round_len < le_ih_k_offset(le_ih)) { *cut_size = -(IH_SIZE + ih_item_len(le_ih)); return M_DELETE; /* Delete this item. */ } /* Calculate first position and size for cutting from item. */ pos_in_item(path) = round_len - (le_ih_k_offset(le_ih) - 1); *cut_size = -(ih_item_len(le_ih) - pos_in_item(path)); return M_CUT; /* Cut from this item. */ } // old file: items may have any length if (new_file_length < le_ih_k_offset(le_ih)) { *cut_size = -(IH_SIZE + ih_item_len(le_ih)); return M_DELETE; /* Delete this item. */ } /* Calculate first position and size for cutting from item. */ *cut_size = -(ih_item_len(le_ih) - (pos_in_item(path) = new_file_length + 1 - le_ih_k_offset(le_ih))); return M_CUT; /* Cut from this item. */ } static inline int prepare_for_direntry_item(struct treepath *path, struct item_head *le_ih, struct inode *inode, loff_t new_file_length, int *cut_size) { if (le_ih_k_offset(le_ih) == DOT_OFFSET && new_file_length == max_reiserfs_offset(inode)) { RFALSE(ih_entry_count(le_ih) != 2, "PAP-5220: incorrect empty directory item (%h)", le_ih); *cut_size = -(IH_SIZE + ih_item_len(le_ih)); return M_DELETE; /* Delete the directory item containing "." and ".." entry. */ } if (ih_entry_count(le_ih) == 1) { /* Delete the directory item such as there is one record only in this item */ *cut_size = -(IH_SIZE + ih_item_len(le_ih)); return M_DELETE; } /* Cut one record from the directory item. */ *cut_size = -(DEH_SIZE + entry_length(get_last_bh(path), le_ih, pos_in_item(path))); return M_CUT; } #define JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD (2 * JOURNAL_PER_BALANCE_CNT + 1) /* If the path points to a directory or direct item, calculate mode and the size cut, for balance. If the path points to an indirect item, remove some number of its unformatted nodes. In case of file truncate calculate whether this item must be deleted/truncated or last unformatted node of this item will be converted to a direct item. This function returns a determination of what balance mode the calling function should employ. */ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path, const struct cpu_key *item_key, int *removed, /* Number of unformatted nodes which were removed from end of the file. */ int *cut_size, unsigned long long new_file_length /* MAX_KEY_OFFSET in case of delete. */ ) { struct super_block *sb = inode->i_sb; struct item_head *p_le_ih = PATH_PITEM_HEAD(path); struct buffer_head *bh = PATH_PLAST_BUFFER(path); BUG_ON(!th->t_trans_id); /* Stat_data item. */ if (is_statdata_le_ih(p_le_ih)) { RFALSE(new_file_length != max_reiserfs_offset(inode), "PAP-5210: mode must be M_DELETE"); *cut_size = -(IH_SIZE + ih_item_len(p_le_ih)); return M_DELETE; } /* Directory item. */ if (is_direntry_le_ih(p_le_ih)) return prepare_for_direntry_item(path, p_le_ih, inode, new_file_length, cut_size); /* Direct item. */ if (is_direct_le_ih(p_le_ih)) return prepare_for_direct_item(path, p_le_ih, inode, new_file_length, cut_size); /* Case of an indirect item. */ { int blk_size = sb->s_blocksize; struct item_head s_ih; int need_re_search; int delete = 0; int result = M_CUT; int pos = 0; if ( new_file_length == max_reiserfs_offset (inode) ) { /* prepare_for_delete_or_cut() is called by * reiserfs_delete_item() */ new_file_length = 0; delete = 1; } do { need_re_search = 0; *cut_size = 0; bh = PATH_PLAST_BUFFER(path); copy_item_head(&s_ih, PATH_PITEM_HEAD(path)); pos = I_UNFM_NUM(&s_ih); while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > new_file_length) { __le32 *unfm; __u32 block; /* Each unformatted block deletion may involve one additional * bitmap block into the transaction, thereby the initial * journal space reservation might not be enough. */ if (!delete && (*cut_size) != 0 && reiserfs_transaction_free_space(th) < JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) break; unfm = (__le32 *)B_I_PITEM(bh, &s_ih) + pos - 1; block = get_block_num(unfm, 0); if (block != 0) { reiserfs_prepare_for_journal(sb, bh, 1); put_block_num(unfm, 0, 0); journal_mark_dirty(th, sb, bh); reiserfs_free_block(th, inode, block, 1); } reiserfs_write_unlock(sb); cond_resched(); reiserfs_write_lock(sb); if (item_moved (&s_ih, path)) { need_re_search = 1; break; } pos --; (*removed)++; (*cut_size) -= UNFM_P_SIZE; if (pos == 0) { (*cut_size) -= IH_SIZE; result = M_DELETE; break; } } /* a trick. If the buffer has been logged, this will do nothing. If ** we've broken the loop without logging it, it will restore the ** buffer */ reiserfs_restore_prepared_buffer(sb, bh); } while (need_re_search && search_for_position_by_key(sb, item_key, path) == POSITION_FOUND); pos_in_item(path) = pos * UNFM_P_SIZE; if (*cut_size == 0) { /* Nothing were cut. maybe convert last unformatted node to the * direct item? */ result = M_CONVERT; } return result; } } /* Calculate number of bytes which will be deleted or cut during balance */ static int calc_deleted_bytes_number(struct tree_balance *tb, char mode) { int del_size; struct item_head *p_le_ih = PATH_PITEM_HEAD(tb->tb_path); if (is_statdata_le_ih(p_le_ih)) return 0; del_size = (mode == M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0]; if (is_direntry_le_ih(p_le_ih)) { /* return EMPTY_DIR_SIZE; We delete emty directoris only. * we can't use EMPTY_DIR_SIZE, as old format dirs have a different * empty size. ick. FIXME, is this right? */ return del_size; } if (is_indirect_le_ih(p_le_ih)) del_size = (del_size / UNFM_P_SIZE) * (PATH_PLAST_BUFFER(tb->tb_path)->b_size); return del_size; } static void init_tb_struct(struct reiserfs_transaction_handle *th, struct tree_balance *tb, struct super_block *sb, struct treepath *path, int size) { BUG_ON(!th->t_trans_id); memset(tb, '\0', sizeof(struct tree_balance)); tb->transaction_handle = th; tb->tb_sb = sb; tb->tb_path = path; PATH_OFFSET_PBUFFER(path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL; PATH_OFFSET_POSITION(path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0; tb->insert_size[0] = size; } void padd_item(char *item, int total_length, int length) { int i; for (i = total_length; i > length;) item[--i] = 0; } #ifdef REISERQUOTA_DEBUG char key2type(struct reiserfs_key *ih) { if (is_direntry_le_key(2, ih)) return 'd'; if (is_direct_le_key(2, ih)) return 'D'; if (is_indirect_le_key(2, ih)) return 'i'; if (is_statdata_le_key(2, ih)) return 's'; return 'u'; } char head2type(struct item_head *ih) { if (is_direntry_le_ih(ih)) return 'd'; if (is_direct_le_ih(ih)) return 'D'; if (is_indirect_le_ih(ih)) return 'i'; if (is_statdata_le_ih(ih)) return 's'; return 'u'; } #endif /* Delete object item. * th - active transaction handle * path - path to the deleted item * item_key - key to search for the deleted item * indode - used for updating i_blocks and quotas * un_bh - NULL or unformatted node pointer */ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath *path, const struct cpu_key *item_key, struct inode *inode, struct buffer_head *un_bh) { struct super_block *sb = inode->i_sb; struct tree_balance s_del_balance; struct item_head s_ih; struct item_head *q_ih; int quota_cut_bytes; int ret_value, del_size, removed; #ifdef CONFIG_REISERFS_CHECK char mode; int iter = 0; #endif BUG_ON(!th->t_trans_id); init_tb_struct(th, &s_del_balance, sb, path, 0 /*size is unknown */ ); while (1) { removed = 0; #ifdef CONFIG_REISERFS_CHECK iter++; mode = #endif prepare_for_delete_or_cut(th, inode, path, item_key, &removed, &del_size, max_reiserfs_offset(inode)); RFALSE(mode != M_DELETE, "PAP-5320: mode must be M_DELETE"); copy_item_head(&s_ih, PATH_PITEM_HEAD(path)); s_del_balance.insert_size[0] = del_size; ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL); if (ret_value != REPEAT_SEARCH) break; PROC_INFO_INC(sb, delete_item_restarted); // file system changed, repeat search ret_value = search_for_position_by_key(sb, item_key, path); if (ret_value == IO_ERROR) break; if (ret_value == FILE_NOT_FOUND) { reiserfs_warning(sb, "vs-5340", "no items of the file %K found", item_key); break; } } /* while (1) */ if (ret_value != CARRY_ON) { unfix_nodes(&s_del_balance); return 0; } // reiserfs_delete_item returns item length when success ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE); q_ih = get_ih(path); quota_cut_bytes = ih_item_len(q_ih); /* hack so the quota code doesn't have to guess if the file ** has a tail. On tail insert, we allocate quota for 1 unformatted node. ** We test the offset because the tail might have been ** split into multiple items, and we only want to decrement for ** the unfm node once */ if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(q_ih)) { if ((le_ih_k_offset(q_ih) & (sb->s_blocksize - 1)) == 1) { quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE; } else { quota_cut_bytes = 0; } } if (un_bh) { int off; char *data; /* We are in direct2indirect conversion, so move tail contents to the unformatted node */ /* note, we do the copy before preparing the buffer because we ** don't care about the contents of the unformatted node yet. ** the only thing we really care about is the direct item's data ** is in the unformatted node. ** ** Otherwise, we would have to call reiserfs_prepare_for_journal on ** the unformatted node, which might schedule, meaning we'd have to ** loop all the way back up to the start of the while loop. ** ** The unformatted node must be dirtied later on. We can't be ** sure here if the entire tail has been deleted yet. ** ** un_bh is from the page cache (all unformatted nodes are ** from the page cache) and might be a highmem page. So, we ** can't use un_bh->b_data. ** -clm */ data = kmap_atomic(un_bh->b_page, KM_USER0); off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); memcpy(data + off, B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih), ret_value); kunmap_atomic(data, KM_USER0); } /* Perform balancing after all resources have been collected at once. */ do_balance(&s_del_balance, NULL, NULL, M_DELETE); #ifdef REISERQUOTA_DEBUG reiserfs_debug(sb, REISERFS_DEBUG_CODE, "reiserquota delete_item(): freeing %u, id=%u type=%c", quota_cut_bytes, inode->i_uid, head2type(&s_ih)); #endif dquot_free_space_nodirty(inode, quota_cut_bytes); /* Return deleted body length */ return ret_value; } /* Summary Of Mechanisms For Handling Collisions Between Processes: deletion of the body of the object is performed by iput(), with the result that if multiple processes are operating on a file, the deletion of the body of the file is deferred until the last process that has an open inode performs its iput(). writes and truncates are protected from collisions by use of semaphores. creates, linking, and mknod are protected from collisions with other processes by making the reiserfs_add_entry() the last step in the creation, and then rolling back all changes if there was a collision. - Hans */ /* this deletes item which never gets split */ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, struct inode *inode, struct reiserfs_key *key) { struct tree_balance tb; INITIALIZE_PATH(path); int item_len = 0; int tb_init = 0; struct cpu_key cpu_key; int retval; int quota_cut_bytes = 0; BUG_ON(!th->t_trans_id); le_key2cpu_key(&cpu_key, key); while (1) { retval = search_item(th->t_super, &cpu_key, &path); if (retval == IO_ERROR) { reiserfs_error(th->t_super, "vs-5350", "i/o failure occurred trying " "to delete %K", &cpu_key); break; } if (retval != ITEM_FOUND) { pathrelse(&path); // No need for a warning, if there is just no free space to insert '..' item into the newly-created subdir if (! ((unsigned long long) GET_HASH_VALUE(le_key_k_offset (le_key_version(key), key)) == 0 && (unsigned long long) GET_GENERATION_NUMBER(le_key_k_offset (le_key_version(key), key)) == 1)) reiserfs_warning(th->t_super, "vs-5355", "%k not found", key); break; } if (!tb_init) { tb_init = 1; item_len = ih_item_len(PATH_PITEM_HEAD(&path)); init_tb_struct(th, &tb, th->t_super, &path, -(IH_SIZE + item_len)); } quota_cut_bytes = ih_item_len(PATH_PITEM_HEAD(&path)); retval = fix_nodes(M_DELETE, &tb, NULL, NULL); if (retval == REPEAT_SEARCH) { PROC_INFO_INC(th->t_super, delete_solid_item_restarted); continue; } if (retval == CARRY_ON) { do_balance(&tb, NULL, NULL, M_DELETE); if (inode) { /* Should we count quota for item? (we don't count quotas for save-links) */ #ifdef REISERQUOTA_DEBUG reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE, "reiserquota delete_solid_item(): freeing %u id=%u type=%c", quota_cut_bytes, inode->i_uid, key2type(key)); #endif dquot_free_space_nodirty(inode, quota_cut_bytes); } break; } // IO_ERROR, NO_DISK_SPACE, etc reiserfs_warning(th->t_super, "vs-5360", "could not delete %K due to fix_nodes failure", &cpu_key); unfix_nodes(&tb); break; } reiserfs_check_path(&path); } int reiserfs_delete_object(struct reiserfs_transaction_handle *th, struct inode *inode) { int err; inode->i_size = 0; BUG_ON(!th->t_trans_id); /* for directory this deletes item containing "." and ".." */ err = reiserfs_do_truncate(th, inode, NULL, 0 /*no timestamp updates */ ); if (err) return err; #if defined( USE_INODE_GENERATION_COUNTER ) if (!old_format_only(th->t_super)) { __le32 *inode_generation; inode_generation = &REISERFS_SB(th->t_super)->s_rs->s_inode_generation; le32_add_cpu(inode_generation, 1); } /* USE_INODE_GENERATION_COUNTER */ #endif reiserfs_delete_solid_item(th, inode, INODE_PKEY(inode)); return err; } static void unmap_buffers(struct page *page, loff_t pos) { struct buffer_head *bh; struct buffer_head *head; struct buffer_head *next; unsigned long tail_index; unsigned long cur_index; if (page) { if (page_has_buffers(page)) { tail_index = pos & (PAGE_CACHE_SIZE - 1); cur_index = 0; head = page_buffers(page); bh = head; do { next = bh->b_this_page; /* we want to unmap the buffers that contain the tail, and ** all the buffers after it (since the tail must be at the ** end of the file). We don't want to unmap file data ** before the tail, since it might be dirty and waiting to ** reach disk */ cur_index += bh->b_size; if (cur_index > tail_index) { reiserfs_unmap_buffer(bh); } bh = next; } while (bh != head); } } } static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th, struct inode *inode, struct page *page, struct treepath *path, const struct cpu_key *item_key, loff_t new_file_size, char *mode) { struct super_block *sb = inode->i_sb; int block_size = sb->s_blocksize; int cut_bytes; BUG_ON(!th->t_trans_id); BUG_ON(new_file_size != inode->i_size); /* the page being sent in could be NULL if there was an i/o error ** reading in the last block. The user will hit problems trying to ** read the file, but for now we just skip the indirect2direct */ if (atomic_read(&inode->i_count) > 1 || !tail_has_to_be_packed(inode) || !page || (REISERFS_I(inode)->i_flags & i_nopack_mask)) { /* leave tail in an unformatted node */ *mode = M_SKIP_BALANCING; cut_bytes = block_size - (new_file_size & (block_size - 1)); pathrelse(path); return cut_bytes; } /* Perform the conversion to a direct_item. */ /* return indirect_to_direct(inode, path, item_key, new_file_size, mode); */ return indirect2direct(th, inode, page, path, item_key, new_file_size, mode); } /* we did indirect_to_direct conversion. And we have inserted direct item successesfully, but there were no disk space to cut unfm pointer being converted. Therefore we have to delete inserted direct item(s) */ static void indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path) { struct cpu_key tail_key; int tail_len; int removed; BUG_ON(!th->t_trans_id); make_cpu_key(&tail_key, inode, inode->i_size + 1, TYPE_DIRECT, 4); // !!!! tail_key.key_length = 4; tail_len = (cpu_key_k_offset(&tail_key) & (inode->i_sb->s_blocksize - 1)) - 1; while (tail_len) { /* look for the last byte of the tail */ if (search_for_position_by_key(inode->i_sb, &tail_key, path) == POSITION_NOT_FOUND) reiserfs_panic(inode->i_sb, "vs-5615", "found invalid item"); RFALSE(path->pos_in_item != ih_item_len(PATH_PITEM_HEAD(path)) - 1, "vs-5616: appended bytes found"); PATH_LAST_POSITION(path)--; removed = reiserfs_delete_item(th, path, &tail_key, inode, NULL /*unbh not needed */ ); RFALSE(removed <= 0 || removed > tail_len, "vs-5617: there was tail %d bytes, removed item length %d bytes", tail_len, removed); tail_len -= removed; set_cpu_key_k_offset(&tail_key, cpu_key_k_offset(&tail_key) - removed); } reiserfs_warning(inode->i_sb, "reiserfs-5091", "indirect_to_direct " "conversion has been rolled back due to " "lack of disk space"); //mark_file_without_tail (inode); mark_inode_dirty(inode); } /* (Truncate or cut entry) or delete object item. Returns < 0 on failure */ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, struct treepath *path, struct cpu_key *item_key, struct inode *inode, struct page *page, loff_t new_file_size) { struct super_block *sb = inode->i_sb; /* Every function which is going to call do_balance must first create a tree_balance structure. Then it must fill up this structure by using the init_tb_struct and fix_nodes functions. After that we can make tree balancing. */ struct tree_balance s_cut_balance; struct item_head *p_le_ih; int cut_size = 0, /* Amount to be cut. */ ret_value = CARRY_ON, removed = 0, /* Number of the removed unformatted nodes. */ is_inode_locked = 0; char mode; /* Mode of the balance. */ int retval2 = -1; int quota_cut_bytes; loff_t tail_pos = 0; BUG_ON(!th->t_trans_id); init_tb_struct(th, &s_cut_balance, inode->i_sb, path, cut_size); /* Repeat this loop until we either cut the item without needing to balance, or we fix_nodes without schedule occurring */ while (1) { /* Determine the balance mode, position of the first byte to be cut, and size to be cut. In case of the indirect item free unformatted nodes which are pointed to by the cut pointers. */ mode = prepare_for_delete_or_cut(th, inode, path, item_key, &removed, &cut_size, new_file_size); if (mode == M_CONVERT) { /* convert last unformatted node to direct item or leave tail in the unformatted node */ RFALSE(ret_value != CARRY_ON, "PAP-5570: can not convert twice"); ret_value = maybe_indirect_to_direct(th, inode, page, path, item_key, new_file_size, &mode); if (mode == M_SKIP_BALANCING) /* tail has been left in the unformatted node */ return ret_value; is_inode_locked = 1; /* removing of last unformatted node will change value we have to return to truncate. Save it */ retval2 = ret_value; /*retval2 = sb->s_blocksize - (new_file_size & (sb->s_blocksize - 1)); */ /* So, we have performed the first part of the conversion: inserting the new direct item. Now we are removing the last unformatted node pointer. Set key to search for it. */ set_cpu_key_k_type(item_key, TYPE_INDIRECT); item_key->key_length = 4; new_file_size -= (new_file_size & (sb->s_blocksize - 1)); tail_pos = new_file_size; set_cpu_key_k_offset(item_key, new_file_size + 1); if (search_for_position_by_key (sb, item_key, path) == POSITION_NOT_FOUND) { print_block(PATH_PLAST_BUFFER(path), 3, PATH_LAST_POSITION(path) - 1, PATH_LAST_POSITION(path) + 1); reiserfs_panic(sb, "PAP-5580", "item to " "convert does not exist (%K)", item_key); } continue; } if (cut_size == 0) { pathrelse(path); return 0; } s_cut_balance.insert_size[0] = cut_size; ret_value = fix_nodes(mode, &s_cut_balance, NULL, NULL); if (ret_value != REPEAT_SEARCH) break; PROC_INFO_INC(sb, cut_from_item_restarted); ret_value = search_for_position_by_key(sb, item_key, path); if (ret_value == POSITION_FOUND) continue; reiserfs_warning(sb, "PAP-5610", "item %K not found", item_key); unfix_nodes(&s_cut_balance); return (ret_value == IO_ERROR) ? -EIO : -ENOENT; } /* while */ // check fix_nodes results (IO_ERROR or NO_DISK_SPACE) if (ret_value != CARRY_ON) { if (is_inode_locked) { // FIXME: this seems to be not needed: we are always able // to cut item indirect_to_direct_roll_back(th, inode, path); } if (ret_value == NO_DISK_SPACE) reiserfs_warning(sb, "reiserfs-5092", "NO_DISK_SPACE"); unfix_nodes(&s_cut_balance); return -EIO; } /* go ahead and perform balancing */ RFALSE(mode == M_PASTE || mode == M_INSERT, "invalid mode"); /* Calculate number of bytes that need to be cut from the item. */ quota_cut_bytes = (mode == M_DELETE) ? ih_item_len(get_ih(path)) : -s_cut_balance. insert_size[0]; if (retval2 == -1) ret_value = calc_deleted_bytes_number(&s_cut_balance, mode); else ret_value = retval2; /* For direct items, we only change the quota when deleting the last ** item. */ p_le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path); if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(p_le_ih)) { if (mode == M_DELETE && (le_ih_k_offset(p_le_ih) & (sb->s_blocksize - 1)) == 1) { // FIXME: this is to keep 3.5 happy REISERFS_I(inode)->i_first_direct_byte = U32_MAX; quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE; } else { quota_cut_bytes = 0; } } #ifdef CONFIG_REISERFS_CHECK if (is_inode_locked) { struct item_head *le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path); /* we are going to complete indirect2direct conversion. Make sure, that we exactly remove last unformatted node pointer of the item */ if (!is_indirect_le_ih(le_ih)) reiserfs_panic(sb, "vs-5652", "item must be indirect %h", le_ih); if (mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE) reiserfs_panic(sb, "vs-5653", "completing " "indirect2direct conversion indirect " "item %h being deleted must be of " "4 byte long", le_ih); if (mode == M_CUT && s_cut_balance.insert_size[0] != -UNFM_P_SIZE) { reiserfs_panic(sb, "vs-5654", "can not complete " "indirect2direct conversion of %h " "(CUT, insert_size==%d)", le_ih, s_cut_balance.insert_size[0]); } /* it would be useful to make sure, that right neighboring item is direct item of this file */ } #endif do_balance(&s_cut_balance, NULL, NULL, mode); if (is_inode_locked) { /* we've done an indirect->direct conversion. when the data block ** was freed, it was removed from the list of blocks that must ** be flushed before the transaction commits, make sure to ** unmap and invalidate it */ unmap_buffers(page, tail_pos); REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask; } #ifdef REISERQUOTA_DEBUG reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota cut_from_item(): freeing %u id=%u type=%c", quota_cut_bytes, inode->i_uid, '?'); #endif dquot_free_space_nodirty(inode, quota_cut_bytes); return ret_value; } static void truncate_directory(struct reiserfs_transaction_handle *th, struct inode *inode) { BUG_ON(!th->t_trans_id); if (inode->i_nlink) reiserfs_error(inode->i_sb, "vs-5655", "link count != 0"); set_le_key_k_offset(KEY_FORMAT_3_5, INODE_PKEY(inode), DOT_OFFSET); set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_DIRENTRY); reiserfs_delete_solid_item(th, inode, INODE_PKEY(inode)); reiserfs_update_sd(th, inode); set_le_key_k_offset(KEY_FORMAT_3_5, INODE_PKEY(inode), SD_OFFSET); set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_STAT_DATA); } /* Truncate file to the new size. Note, this must be called with a transaction already started */ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, struct inode *inode, /* ->i_size contains new size */ struct page *page, /* up to date for last block */ int update_timestamps /* when it is called by file_release to convert the tail - no timestamps should be updated */ ) { INITIALIZE_PATH(s_search_path); /* Path to the current object item. */ struct item_head *p_le_ih; /* Pointer to an item header. */ struct cpu_key s_item_key; /* Key to search for a previous file item. */ loff_t file_size, /* Old file size. */ new_file_size; /* New file size. */ int deleted; /* Number of deleted or truncated bytes. */ int retval; int err = 0; BUG_ON(!th->t_trans_id); if (! (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return 0; if (S_ISDIR(inode->i_mode)) { // deletion of directory - no need to update timestamps truncate_directory(th, inode); return 0; } /* Get new file size. */ new_file_size = inode->i_size; // FIXME: note, that key type is unimportant here make_cpu_key(&s_item_key, inode, max_reiserfs_offset(inode), TYPE_DIRECT, 3); retval = search_for_position_by_key(inode->i_sb, &s_item_key, &s_search_path); if (retval == IO_ERROR) { reiserfs_error(inode->i_sb, "vs-5657", "i/o failure occurred trying to truncate %K", &s_item_key); err = -EIO; goto out; } if (retval == POSITION_FOUND || retval == FILE_NOT_FOUND) { reiserfs_error(inode->i_sb, "PAP-5660", "wrong result %d of search for %K", retval, &s_item_key); err = -EIO; goto out; } s_search_path.pos_in_item--; /* Get real file size (total length of all file items) */ p_le_ih = PATH_PITEM_HEAD(&s_search_path); if (is_statdata_le_ih(p_le_ih)) file_size = 0; else { loff_t offset = le_ih_k_offset(p_le_ih); int bytes = op_bytes_number(p_le_ih, inode->i_sb->s_blocksize); /* this may mismatch with real file size: if last direct item had no padding zeros and last unformatted node had no free space, this file would have this file size */ file_size = offset + bytes - 1; } /* * are we doing a full truncate or delete, if so * kick in the reada code */ if (new_file_size == 0) s_search_path.reada = PATH_READA | PATH_READA_BACK; if (file_size == 0 || file_size < new_file_size) { goto update_and_out; } /* Update key to search for the last file item. */ set_cpu_key_k_offset(&s_item_key, file_size); do { /* Cut or delete file item. */ deleted = reiserfs_cut_from_item(th, &s_search_path, &s_item_key, inode, page, new_file_size); if (deleted < 0) { reiserfs_warning(inode->i_sb, "vs-5665", "reiserfs_cut_from_item failed"); reiserfs_check_path(&s_search_path); return 0; } RFALSE(deleted > file_size, "PAP-5670: reiserfs_cut_from_item: too many bytes deleted: deleted %d, file_size %lu, item_key %K", deleted, file_size, &s_item_key); /* Change key to search the last file item. */ file_size -= deleted; set_cpu_key_k_offset(&s_item_key, file_size); /* While there are bytes to truncate and previous file item is presented in the tree. */ /* ** This loop could take a really long time, and could log ** many more blocks than a transaction can hold. So, we do a polite ** journal end here, and if the transaction needs ending, we make ** sure the file is consistent before ending the current trans ** and starting a new one */ if (journal_transaction_should_end(th, 0) || reiserfs_transaction_free_space(th) <= JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) { int orig_len_alloc = th->t_blocks_allocated; pathrelse(&s_search_path); if (update_timestamps) { inode->i_mtime = CURRENT_TIME_SEC; inode->i_ctime = CURRENT_TIME_SEC; } reiserfs_update_sd(th, inode); err = journal_end(th, inode->i_sb, orig_len_alloc); if (err) goto out; err = journal_begin(th, inode->i_sb, JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD + JOURNAL_PER_BALANCE_CNT * 4) ; if (err) goto out; reiserfs_update_inode_transaction(inode); } } while (file_size > ROUND_UP(new_file_size) && search_for_position_by_key(inode->i_sb, &s_item_key, &s_search_path) == POSITION_FOUND); RFALSE(file_size > ROUND_UP(new_file_size), "PAP-5680: truncate did not finish: new_file_size %Ld, current %Ld, oid %d", new_file_size, file_size, s_item_key.on_disk_key.k_objectid); update_and_out: if (update_timestamps) { // this is truncate, not file closing inode->i_mtime = CURRENT_TIME_SEC; inode->i_ctime = CURRENT_TIME_SEC; } reiserfs_update_sd(th, inode); out: pathrelse(&s_search_path); return err; } #ifdef CONFIG_REISERFS_CHECK // this makes sure, that we __append__, not overwrite or add holes static void check_research_for_paste(struct treepath *path, const struct cpu_key *key) { struct item_head *found_ih = get_ih(path); if (is_direct_le_ih(found_ih)) { if (le_ih_k_offset(found_ih) + op_bytes_number(found_ih, get_last_bh(path)->b_size) != cpu_key_k_offset(key) || op_bytes_number(found_ih, get_last_bh(path)->b_size) != pos_in_item(path)) reiserfs_panic(NULL, "PAP-5720", "found direct item " "%h or position (%d) does not match " "to key %K", found_ih, pos_in_item(path), key); } if (is_indirect_le_ih(found_ih)) { if (le_ih_k_offset(found_ih) + op_bytes_number(found_ih, get_last_bh(path)->b_size) != cpu_key_k_offset(key) || I_UNFM_NUM(found_ih) != pos_in_item(path) || get_ih_free_space(found_ih) != 0) reiserfs_panic(NULL, "PAP-5730", "found indirect " "item (%h) or position (%d) does not " "match to key (%K)", found_ih, pos_in_item(path), key); } } #endif /* config reiserfs check */ /* Paste bytes to the existing item. Returns bytes number pasted into the item. */ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct treepath *search_path, /* Path to the pasted item. */ const struct cpu_key *key, /* Key to search for the needed item. */ struct inode *inode, /* Inode item belongs to */ const char *body, /* Pointer to the bytes to paste. */ int pasted_size) { /* Size of pasted bytes. */ struct tree_balance s_paste_balance; int retval; int fs_gen; BUG_ON(!th->t_trans_id); fs_gen = get_generation(inode->i_sb); #ifdef REISERQUOTA_DEBUG reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota paste_into_item(): allocating %u id=%u type=%c", pasted_size, inode->i_uid, key2type(&(key->on_disk_key))); #endif retval = dquot_alloc_space_nodirty(inode, pasted_size); if (retval) { pathrelse(search_path); return retval; } init_tb_struct(th, &s_paste_balance, th->t_super, search_path, pasted_size); #ifdef DISPLACE_NEW_PACKING_LOCALITIES s_paste_balance.key = key->on_disk_key; #endif /* DQUOT_* can schedule, must check before the fix_nodes */ if (fs_changed(fs_gen, inode->i_sb)) { goto search_again; } while ((retval = fix_nodes(M_PASTE, &s_paste_balance, NULL, body)) == REPEAT_SEARCH) { search_again: /* file system changed while we were in the fix_nodes */ PROC_INFO_INC(th->t_super, paste_into_item_restarted); retval = search_for_position_by_key(th->t_super, key, search_path); if (retval == IO_ERROR) { retval = -EIO; goto error_out; } if (retval == POSITION_FOUND) { reiserfs_warning(inode->i_sb, "PAP-5710", "entry or pasted byte (%K) exists", key); retval = -EEXIST; goto error_out; } #ifdef CONFIG_REISERFS_CHECK check_research_for_paste(search_path, key); #endif } /* Perform balancing after all resources are collected by fix_nodes, and accessing them will not risk triggering schedule. */ if (retval == CARRY_ON) { do_balance(&s_paste_balance, NULL /*ih */ , body, M_PASTE); return 0; } retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO; error_out: /* this also releases the path */ unfix_nodes(&s_paste_balance); #ifdef REISERQUOTA_DEBUG reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota paste_into_item(): freeing %u id=%u type=%c", pasted_size, inode->i_uid, key2type(&(key->on_disk_key))); #endif dquot_free_space_nodirty(inode, pasted_size); return retval; } /* Insert new item into the buffer at the path. * th - active transaction handle * path - path to the inserted item * ih - pointer to the item header to insert * body - pointer to the bytes to insert */ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath *path, const struct cpu_key *key, struct item_head *ih, struct inode *inode, const char *body) { struct tree_balance s_ins_balance; int retval; int fs_gen = 0; int quota_bytes = 0; BUG_ON(!th->t_trans_id); if (inode) { /* Do we count quotas for item? */ fs_gen = get_generation(inode->i_sb); quota_bytes = ih_item_len(ih); /* hack so the quota code doesn't have to guess if the file has ** a tail, links are always tails, so there's no guessing needed */ if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(ih)) quota_bytes = inode->i_sb->s_blocksize + UNFM_P_SIZE; #ifdef REISERQUOTA_DEBUG reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota insert_item(): allocating %u id=%u type=%c", quota_bytes, inode->i_uid, head2type(ih)); #endif /* We can't dirty inode here. It would be immediately written but * appropriate stat item isn't inserted yet... */ retval = dquot_alloc_space_nodirty(inode, quota_bytes); if (retval) { pathrelse(path); return retval; } } init_tb_struct(th, &s_ins_balance, th->t_super, path, IH_SIZE + ih_item_len(ih)); #ifdef DISPLACE_NEW_PACKING_LOCALITIES s_ins_balance.key = key->on_disk_key; #endif /* DQUOT_* can schedule, must check to be sure calling fix_nodes is safe */ if (inode && fs_changed(fs_gen, inode->i_sb)) { goto search_again; } while ((retval = fix_nodes(M_INSERT, &s_ins_balance, ih, body)) == REPEAT_SEARCH) { search_again: /* file system changed while we were in the fix_nodes */ PROC_INFO_INC(th->t_super, insert_item_restarted); retval = search_item(th->t_super, key, path); if (retval == IO_ERROR) { retval = -EIO; goto error_out; } if (retval == ITEM_FOUND) { reiserfs_warning(th->t_super, "PAP-5760", "key %K already exists in the tree", key); retval = -EEXIST; goto error_out; } } /* make balancing after all resources will be collected at a time */ if (retval == CARRY_ON) { do_balance(&s_ins_balance, ih, body, M_INSERT); return 0; } retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO; error_out: /* also releases the path */ unfix_nodes(&s_ins_balance); #ifdef REISERQUOTA_DEBUG reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE, "reiserquota insert_item(): freeing %u id=%u type=%c", quota_bytes, inode->i_uid, head2type(ih)); #endif if (inode) dquot_free_space_nodirty(inode, quota_bytes); return retval; }
gpl-2.0